diff --git a/googleAppEngine/scipy/env/lib/python2.7/UserDict.pyc b/googleAppEngine/scipy/env/lib/python2.7/UserDict.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74a349ca06489508a038474742b0c30ddf63f40c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/UserDict.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/_abcoll.pyc b/googleAppEngine/scipy/env/lib/python2.7/_abcoll.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7c9336694f3b68546d1078b407bd1205b828468 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/_abcoll.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/_weakrefset.pyc b/googleAppEngine/scipy/env/lib/python2.7/_weakrefset.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0feb35f1ed1a5340023c172080ae5a3fcc9deb27 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/_weakrefset.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/abc.pyc b/googleAppEngine/scipy/env/lib/python2.7/abc.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e11ac6f6549c804b2061376e6dae6e1f712e4f4 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/abc.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/codecs.pyc b/googleAppEngine/scipy/env/lib/python2.7/codecs.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14858bcfba8149c96063164b9bc4fcc389e4d140 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/codecs.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/copy_reg.pyc b/googleAppEngine/scipy/env/lib/python2.7/copy_reg.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0377b17e0be3b36a864133e6b915359df9ac9f3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/copy_reg.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/genericpath.pyc b/googleAppEngine/scipy/env/lib/python2.7/genericpath.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9fe699e51674a795c188a5b267f20020ff20183 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/genericpath.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/linecache.pyc b/googleAppEngine/scipy/env/lib/python2.7/linecache.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5926d72d92877a1b91921e89e883593a86818ad3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/linecache.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/ntpath.pyc b/googleAppEngine/scipy/env/lib/python2.7/ntpath.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04bfccd137a68635efe0d9d50aea9f2ce54e9d45 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/ntpath.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/os.pyc b/googleAppEngine/scipy/env/lib/python2.7/os.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcb0bd3d1cc9b7d5a9385173713be764f09f5e11 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/os.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/posixpath.pyc b/googleAppEngine/scipy/env/lib/python2.7/posixpath.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6229df37124c951f63ff23f322675f2006c958d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/posixpath.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/re.pyc b/googleAppEngine/scipy/env/lib/python2.7/re.pyc new file mode 100644 index 0000000000000000000000000000000000000000..992bd0319f2cc410ea6508940513a4f77ba8c0d6 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/re.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/INSTALLER b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/LICENSE.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..87ce152aafba76f0200eae513fd0541f4f767acd --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/LICENSE.txt @@ -0,0 +1,39 @@ +Copyright © 2014 by the Pallets team. + +Some rights reserved. + +Redistribution and use in source and binary forms of the software as +well as documentation, with or without modification, are permitted +provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +- Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +---- + +Click uses parts of optparse written by Gregory P. Ward and maintained +by the Python Software Foundation. This is limited to code in parser.py. + +Copyright © 2001-2006 Gregory P. Ward. All rights reserved. +Copyright © 2002-2006 Python Software Foundation. All rights reserved. diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/METADATA b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..625bdaddbba169ae3b25b492a591eff6cc2e4013 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/METADATA @@ -0,0 +1,121 @@ +Metadata-Version: 2.1 +Name: Click +Version: 7.0 +Summary: Composable command line interface toolkit +Home-page: https://palletsprojects.com/p/click/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: Pallets Team +Maintainer-email: contact@palletsprojects.com +License: BSD +Project-URL: Documentation, https://click.palletsprojects.com/ +Project-URL: Code, https://github.com/pallets/click +Project-URL: Issue tracker, https://github.com/pallets/click/issues +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* + +\$ click\_ +========== + +Click is a Python package for creating beautiful command line interfaces +in a composable way with as little code as necessary. It's the "Command +Line Interface Creation Kit". It's highly configurable but comes with +sensible defaults out of the box. + +It aims to make the process of writing command line tools quick and fun +while also preventing any frustration caused by the inability to +implement an intended CLI API. + +Click in three points: + +- Arbitrary nesting of commands +- Automatic help page generation +- Supports lazy loading of subcommands at runtime + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + $ pip install click + +Click supports Python 3.4 and newer, Python 2.7, and PyPy. + +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + +A Simple Example +---------------- + +What does it look like? Here is an example of a simple Click program: + +.. code-block:: python + + import click + + @click.command() + @click.option("--count", default=1, help="Number of greetings.") + @click.option("--name", prompt="Your name", + help="The person to greet.") + def hello(count, name): + """Simple program that greets NAME for a total of COUNT times.""" + for _ in range(count): + click.echo("Hello, %s!" % name) + + if __name__ == '__main__': + hello() + +And what it looks like when run: + +.. code-block:: text + + $ python hello.py --count=3 + Your name: Click + Hello, Click! + Hello, Click! + Hello, Click! + + +Donate +------ + +The Pallets organization develops and supports Click and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +* Website: https://palletsprojects.com/p/click/ +* Documentation: https://click.palletsprojects.com/ +* License: `BSD `_ +* Releases: https://pypi.org/project/click/ +* Code: https://github.com/pallets/click +* Issue tracker: https://github.com/pallets/click/issues +* Test status: + + * Linux, Mac: https://travis-ci.org/pallets/click + * Windows: https://ci.appveyor.com/project/pallets/click + +* Test coverage: https://codecov.io/gh/pallets/click + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/RECORD b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..0dac84328f7c3499f8c0be57e94e244d6dd0b48f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/RECORD @@ -0,0 +1,40 @@ +Click-7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Click-7.0.dist-info/LICENSE.txt,sha256=4hIxn676T0Wcisk3_chVcECjyrivKTZsoqSNI5AlIlw,1876 +Click-7.0.dist-info/METADATA,sha256=-r8jeke3Zer4diRvT1MjFZuiJ6yTT_qFP39svLqdaLI,3516 +Click-7.0.dist-info/RECORD,, +Click-7.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 +Click-7.0.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6 +click/__init__.py,sha256=HjGThQ7tef9kkwCV371TBnrf0SAi6fKfU_jtEnbYTvQ,2789 +click/__init__.pyc,, +click/_bashcomplete.py,sha256=iaNUmtxag0YPfxba3TDYCNietiTMQIrvhRLj-H8okFU,11014 +click/_bashcomplete.pyc,, +click/_compat.py,sha256=vYmvoj4opPxo-c-2GMQQjYT_r_QkOKybkfGoeVrt0dA,23399 +click/_compat.pyc,, +click/_termui_impl.py,sha256=xHmLtOJhKUCVD6168yucJ9fknUJPAMs0eUTPgVUO-GQ,19611 +click/_termui_impl.pyc,, +click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198 +click/_textwrap.pyc,, +click/_unicodefun.py,sha256=QHy2_5jYlX-36O-JVrTHNnHOqg8tquUR0HmQFev7Ics,4364 +click/_unicodefun.pyc,, +click/_winconsole.py,sha256=PPWVak8Iikm_gAPsxMrzwsVFCvHgaW3jPaDWZ1JBl3U,8965 +click/_winconsole.pyc,, +click/core.py,sha256=q8FLcDZsagBGSRe5Y9Hi_FGvAeZvusNfoO5EkhkSQ8Y,75305 +click/core.pyc,, +click/decorators.py,sha256=idKt6duLUUfAFftrHoREi8MJSd39XW36pUVHthdglwk,11226 +click/decorators.pyc,, +click/exceptions.py,sha256=CNpAjBAE7qjaV4WChxQeak95e5yUOau8AsvT-8m6wss,7663 +click/exceptions.pyc,, +click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889 +click/formatting.pyc,, +click/globals.py,sha256=oQkou3ZQ5DgrbVM6BwIBirwiqozbjfirzsLGAlLRRdg,1514 +click/globals.pyc,, +click/parser.py,sha256=m-nGZz4VwprM42_qtFlWFGo7yRJQxkBlRcZodoH593Y,15510 +click/parser.pyc,, +click/termui.py,sha256=o_ZXB2jyvL2Rce7P_bFGq452iyBq9ykJyRApIPMCZO0,23207 +click/termui.pyc,, +click/testing.py,sha256=aYGqY_iWLu2p4k7lkuJ6t3fqpf6aPGqTsyLzNY_ngKg,13062 +click/testing.pyc,, +click/types.py,sha256=2Q929p-aBP_ZYuMFJqJR-Ipucofv3fmDc5JzBDPmzJU,23287 +click/types.pyc,, +click/utils.py,sha256=6-D0WkAxvv9FkgHXSHwDIv0l9Gdx9Mm6Z5vuKNLIfZI,15763 +click/utils.pyc,, diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/WHEEL b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1316c41d0706f2dbe6e8d73a809c31d67f02954a --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/top_level.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..dca9a909647e3b066931de2909c2d1e65c78c995 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Click-7.0.dist-info/top_level.txt @@ -0,0 +1 @@ +click diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/INSTALLER b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/LICENSE.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f9252f4527debb8ec5ae57df4ffd134cfd3c643 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/LICENSE.txt @@ -0,0 +1,31 @@ +Copyright © 2010 by the Pallets team. + +Some rights reserved. + +Redistribution and use in source and binary forms of the software as +well as documentation, with or without modification, are permitted +provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/METADATA b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..c600e730a953ee3a109ededd74fb36650b601a69 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/METADATA @@ -0,0 +1,130 @@ +Metadata-Version: 2.1 +Name: Flask +Version: 1.0.2 +Summary: A simple framework for building complex web applications. +Home-page: https://www.palletsprojects.com/p/flask/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: Pallets team +Maintainer-email: contact@palletsprojects.com +License: BSD +Project-URL: Documentation, http://flask.pocoo.org/docs/ +Project-URL: Code, https://github.com/pallets/flask +Project-URL: Issue tracker, https://github.com/pallets/flask/issues +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Framework :: Flask +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application +Classifier: Topic :: Software Development :: Libraries :: Application Frameworks +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides-Extra: dev +Provides-Extra: docs +Provides-Extra: dotenv +Requires-Dist: Werkzeug (>=0.14) +Requires-Dist: Jinja2 (>=2.10) +Requires-Dist: itsdangerous (>=0.24) +Requires-Dist: click (>=5.1) +Provides-Extra: dev +Requires-Dist: pytest (>=3); extra == 'dev' +Requires-Dist: coverage; extra == 'dev' +Requires-Dist: tox; extra == 'dev' +Requires-Dist: sphinx; extra == 'dev' +Requires-Dist: pallets-sphinx-themes; extra == 'dev' +Requires-Dist: sphinxcontrib-log-cabinet; extra == 'dev' +Provides-Extra: docs +Requires-Dist: sphinx; extra == 'docs' +Requires-Dist: pallets-sphinx-themes; extra == 'docs' +Requires-Dist: sphinxcontrib-log-cabinet; extra == 'docs' +Provides-Extra: dotenv +Requires-Dist: python-dotenv; extra == 'dotenv' + +Flask +===== + +Flask is a lightweight `WSGI`_ web application framework. It is designed +to make getting started quick and easy, with the ability to scale up to +complex applications. It began as a simple wrapper around `Werkzeug`_ +and `Jinja`_ and has become one of the most popular Python web +application frameworks. + +Flask offers suggestions, but doesn't enforce any dependencies or +project layout. It is up to the developer to choose the tools and +libraries they want to use. There are many extensions provided by the +community that make adding new functionality easy. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U Flask + + +A Simple Example +---------------- + +.. code-block:: python + + from flask import Flask + + app = Flask(__name__) + + @app.route('/') + def hello(): + return 'Hello, World!' + +.. code-block:: text + + $ FLASK_APP=hello.py flask run + * Serving Flask app "hello" + * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) + + +Donate +------ + +The Pallets organization develops and supports Flask and the libraries +it uses. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://psfmember.org/civicrm/contribute/transact?reset=1&id=20 + + +Links +----- + +* Website: https://www.palletsprojects.com/p/flask/ +* Documentation: http://flask.pocoo.org/docs/ +* License: `BSD `_ +* Releases: https://pypi.org/project/Flask/ +* Code: https://github.com/pallets/flask +* Issue tracker: https://github.com/pallets/flask/issues +* Test status: + + * Linux, Mac: https://travis-ci.org/pallets/flask + * Windows: https://ci.appveyor.com/project/pallets/flask + +* Test coverage: https://codecov.io/gh/pallets/flask + +.. _WSGI: https://wsgi.readthedocs.io +.. _Werkzeug: https://www.palletsprojects.com/p/werkzeug/ +.. _Jinja: https://www.palletsprojects.com/p/jinja/ +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/RECORD b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..42bc7ac294ecc411fd80ab2404776659cd52ae41 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/RECORD @@ -0,0 +1,48 @@ +../../../bin/flask,sha256=96l5zVShoR5xhkJCakUhZD8arIC-TytHxzbMuChkz0U,278 +Flask-1.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Flask-1.0.2.dist-info/LICENSE.txt,sha256=ziEXA3AIuaiUn1qe4cd1XxCESWTYrk4TjN7Qb06J3l8,1575 +Flask-1.0.2.dist-info/METADATA,sha256=iA5tiNWzTtgCVe80aTZGNWsckj853fJyfvHs9U-WZRk,4182 +Flask-1.0.2.dist-info/RECORD,, +Flask-1.0.2.dist-info/WHEEL,sha256=J3CsTk7Mf2JNUyhImI-mjX-fmI4oDjyiXgWT4qgZiCE,110 +Flask-1.0.2.dist-info/entry_points.txt,sha256=gBLA1aKg0OYR8AhbAfg8lnburHtKcgJLDU52BBctN0k,42 +Flask-1.0.2.dist-info/top_level.txt,sha256=dvi65F6AeGWVU0TBpYiC04yM60-FX1gJFkK31IKQr5c,6 +flask/__init__.py,sha256=qq8lK6QQbxJALf1igz7qsvUwOTAoKvFGfdLm7jPNsso,1673 +flask/__init__.pyc,, +flask/__main__.py,sha256=pgIXrHhxM5MAMvgzAqWpw_t6AXZ1zG38us4JRgJKtxk,291 +flask/__main__.pyc,, +flask/_compat.py,sha256=UDFGhosh6mOdNB-4evKPuneHum1OpcAlwTNJCRm0irQ,2892 +flask/_compat.pyc,, +flask/app.py,sha256=ahpe3T8w98rQd_Er5d7uDxK57S1nnqGQx3V3hirBovU,94147 +flask/app.pyc,, +flask/blueprints.py,sha256=Cyhl_x99tgwqEZPtNDJUFneAfVJxWfEU4bQA7zWS6VU,18331 +flask/blueprints.pyc,, +flask/cli.py,sha256=30QYAO10Do9LbZYCLgfI_xhKjASdLopL8wKKVUGS2oA,29442 +flask/cli.pyc,, +flask/config.py,sha256=kznUhj4DLYxsTF_4kfDG8GEHto1oZG_kqblyrLFtpqQ,9951 +flask/config.pyc,, +flask/ctx.py,sha256=leFzS9fzmo0uaLCdxpHc5_iiJZ1H0X_Ig4yPCOvT--g,16224 +flask/ctx.pyc,, +flask/debughelpers.py,sha256=1ceC-UyqZTd4KsJkf0OObHPsVt5R3T6vnmYhiWBjV-w,6479 +flask/debughelpers.pyc,, +flask/globals.py,sha256=pGg72QW_-4xUfsI33I5L_y76c21AeqfSqXDcbd8wvXU,1649 +flask/globals.pyc,, +flask/helpers.py,sha256=YCl8D1plTO1evEYP4KIgaY3H8Izww5j4EdgRJ89oHTw,40106 +flask/helpers.pyc,, +flask/json/__init__.py,sha256=Ns1Hj805XIxuBMh2z0dYnMVfb_KUgLzDmP3WoUYaPhw,10729 +flask/json/__init__.pyc,, +flask/json/tag.py,sha256=9ehzrmt5k7hxf7ZEK0NOs3swvQyU9fWNe-pnYe69N60,8223 +flask/json/tag.pyc,, +flask/logging.py,sha256=qV9h0vt7NIRkKM9OHDWndzO61E5CeBMlqPJyTt-W2Wc,2231 +flask/logging.pyc,, +flask/sessions.py,sha256=2XHV4ASREhSEZ8bsPQW6pNVNuFtbR-04BzfKg0AfvHo,14452 +flask/sessions.pyc,, +flask/signals.py,sha256=BGQbVyCYXnzKK2DVCzppKFyWN1qmrtW1QMAYUs-1Nr8,2211 +flask/signals.pyc,, +flask/templating.py,sha256=FDfWMbpgpC3qObW8GGXRAVrkHFF8K4CHOJymB1wvULI,4914 +flask/templating.pyc,, +flask/testing.py,sha256=XD3gWNvLUV8dqVHwKd9tZzsj81fSHtjOphQ1wTNtlMs,9379 +flask/testing.pyc,, +flask/views.py,sha256=Wy-_WkUVtCfE2zCXYeJehNgHuEtviE4v3HYfJ--MpbY,5733 +flask/views.pyc,, +flask/wrappers.py,sha256=1Z9hF5-hXQajn_58XITQFRY8efv3Vy3uZ0avBfZu6XI,7511 +flask/wrappers.pyc,, diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/WHEEL b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..f21b51cd8af46f70e8fc3a7bd678e2db8baf104c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/entry_points.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..1eb025200e62eb7e4f4c5d27a8498df07b84c5f2 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +flask = flask.cli:main + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/top_level.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..7e1060246fd6746a14204539a72e199a25469a05 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Flask-1.0.2.dist-info/top_level.txt @@ -0,0 +1 @@ +flask diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000000000000000000000000000000000000..1594da5ce5fd98bdb9a05462f6f7252e28c74644 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst @@ -0,0 +1,37 @@ + +Jinja2 +~~~~~~ + +Jinja2 is a template engine written in pure Python. It provides a +`Django`_ inspired non-XML syntax but supports inline expressions and +an optional `sandboxed`_ environment. + +Nutshell +-------- + +Here a small example of a Jinja template:: + + {% extends 'base.html' %} + {% block title %}Memberlist{% endblock %} + {% block content %} + + {% endblock %} + +Philosophy +---------- + +Application logic is for the controller but don't try to make the life +for the template designer too hard by giving him too few functionality. + +For more informations visit the new `Jinja2 webpage`_ and `documentation`_. + +.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security) +.. _Django: https://www.djangoproject.com/ +.. _Jinja2 webpage: http://jinja.pocoo.org/ +.. _documentation: http://jinja.pocoo.org/2/documentation/ + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/INSTALLER b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/LICENSE.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..10145a264342b7888ec6accfedc4f2808fb67a0e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/LICENSE.txt @@ -0,0 +1,31 @@ +Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details. + +Some rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/METADATA b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..40f2b46b40668ac2e7641811be66cd1f7f533939 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/METADATA @@ -0,0 +1,68 @@ +Metadata-Version: 2.0 +Name: Jinja2 +Version: 2.10 +Summary: A small but fast and easy to use stand-alone template engine written in pure python. +Home-page: http://jinja.pocoo.org/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Description-Content-Type: UNKNOWN +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Dist: MarkupSafe (>=0.23) +Provides-Extra: i18n +Requires-Dist: Babel (>=0.8); extra == 'i18n' + + +Jinja2 +~~~~~~ + +Jinja2 is a template engine written in pure Python. It provides a +`Django`_ inspired non-XML syntax but supports inline expressions and +an optional `sandboxed`_ environment. + +Nutshell +-------- + +Here a small example of a Jinja template:: + + {% extends 'base.html' %} + {% block title %}Memberlist{% endblock %} + {% block content %} + + {% endblock %} + +Philosophy +---------- + +Application logic is for the controller but don't try to make the life +for the template designer too hard by giving him too few functionality. + +For more informations visit the new `Jinja2 webpage`_ and `documentation`_. + +.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security) +.. _Django: https://www.djangoproject.com/ +.. _Jinja2 webpage: http://jinja.pocoo.org/ +.. _documentation: http://jinja.pocoo.org/2/documentation/ + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/RECORD b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..6151967b48dd07703c271157b0a3e2ee96c220ab --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/RECORD @@ -0,0 +1,61 @@ +Jinja2-2.10.dist-info/DESCRIPTION.rst,sha256=b5ckFDoM7vVtz_mAsJD4OPteFKCqE7beu353g4COoYI,978 +Jinja2-2.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Jinja2-2.10.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554 +Jinja2-2.10.dist-info/METADATA,sha256=18EgU8zR6-av-0-5y_gXebzK4GnBB_76lALUsl-6QHM,2258 +Jinja2-2.10.dist-info/RECORD,, +Jinja2-2.10.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 +Jinja2-2.10.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72 +Jinja2-2.10.dist-info/metadata.json,sha256=NPUJ9TMBxVQAv_kTJzvU8HwmP-4XZvbK9mz6_4YUVl4,1473 +Jinja2-2.10.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 +jinja2/__init__.py,sha256=xJHjaMoy51_KXn1wf0cysH6tUUifUxZCwSOfcJGEYZw,2614 +jinja2/__init__.pyc,, +jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596 +jinja2/_compat.pyc,, +jinja2/_identifier.py,sha256=W1QBSY-iJsyt6oR_nKSuNNCzV95vLIOYgUNPUI1d5gU,1726 +jinja2/_identifier.pyc,, +jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144 +jinja2/asyncsupport.py,sha256=UErQ3YlTLaSjFb94P4MVn08-aVD9jJxty2JVfMRb-1M,7878 +jinja2/bccache.py,sha256=nQldx0ZRYANMyfvOihRoYFKSlUdd5vJkS7BjxNwlOZM,12794 +jinja2/bccache.pyc,, +jinja2/compiler.py,sha256=BqC5U6JxObSRhblyT_a6Tp5GtEU5z3US1a4jLQaxxgo,65386 +jinja2/compiler.pyc,, +jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626 +jinja2/constants.pyc,, +jinja2/debug.py,sha256=WTVeUFGUa4v6ReCsYv-iVPa3pkNB75OinJt3PfxNdXs,12045 +jinja2/debug.pyc,, +jinja2/defaults.py,sha256=Em-95hmsJxIenDCZFB1YSvf9CNhe9rBmytN3yUrBcWA,1400 +jinja2/defaults.pyc,, +jinja2/environment.py,sha256=VnkAkqw8JbjZct4tAyHlpBrka2vqB-Z58RAP-32P1ZY,50849 +jinja2/environment.pyc,, +jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428 +jinja2/exceptions.pyc,, +jinja2/ext.py,sha256=atMQydEC86tN1zUsdQiHw5L5cF62nDbqGue25Yiu3N4,24500 +jinja2/ext.pyc,, +jinja2/filters.py,sha256=yOAJk0MsH-_gEC0i0U6NweVQhbtYaC-uE8xswHFLF4w,36528 +jinja2/filters.pyc,, +jinja2/idtracking.py,sha256=2GbDSzIvGArEBGLkovLkqEfmYxmWsEf8c3QZwM4uNsw,9197 +jinja2/idtracking.pyc,, +jinja2/lexer.py,sha256=ySEPoXd1g7wRjsuw23uimS6nkGN5aqrYwcOKxCaVMBQ,28559 +jinja2/lexer.pyc,, +jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382 +jinja2/loaders.pyc,, +jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340 +jinja2/meta.pyc,, +jinja2/nativetypes.py,sha256=_sJhS8f-8Q0QMIC0dm1YEdLyxEyoO-kch8qOL5xUDfE,7308 +jinja2/nativetypes.pyc,, +jinja2/nodes.py,sha256=L10L_nQDfubLhO3XjpF9qz46FSh2clL-3e49ogVlMmA,30853 +jinja2/nodes.pyc,, +jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722 +jinja2/optimizer.pyc,, +jinja2/parser.py,sha256=lPzTEbcpTRBLw8ii6OYyExHeAhaZLMA05Hpv4ll3ULk,35875 +jinja2/parser.pyc,, +jinja2/runtime.py,sha256=DHdD38Pq8gj7uWQC5usJyWFoNWL317A9AvXOW_CLB34,27755 +jinja2/runtime.pyc,, +jinja2/sandbox.py,sha256=TVyZHlNqqTzsv9fv2NvJNmSdWRHTguhyMHdxjWms32U,16708 +jinja2/sandbox.pyc,, +jinja2/tests.py,sha256=iJQLwbapZr-EKquTG_fVOVdwHUUKf3SX9eNkjQDF8oU,4237 +jinja2/tests.pyc,, +jinja2/utils.py,sha256=q24VupGZotQ-uOyrJxCaXtDWhZC1RgsQG7kcdmjck2Q,20629 +jinja2/utils.pyc,, +jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316 +jinja2/visitor.pyc,, diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/WHEEL b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7332a419cda6903b61439f3bac93492b0747e6e7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.30.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/entry_points.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..32e6b7530284307358fa869cf232318221f791fd --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/entry_points.txt @@ -0,0 +1,4 @@ + + [babel.extractors] + jinja2 = jinja2.ext:babel_extract[i18n] + \ No newline at end of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/metadata.json b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..7f5dc3879d9320ee95e12f257f83222dd331a304 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://jinja.pocoo.org/"}}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "extras": ["i18n"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "Jinja2", "run_requires": [{"extra": "i18n", "requires": ["Babel (>=0.8)"]}, {"requires": ["MarkupSafe (>=0.23)"]}], "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "version": "2.10"} \ No newline at end of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/top_level.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..7f7afbf3bf54b346092be6a72070fcbd305ead1e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Jinja2-2.10.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/INSTALLER b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/LICENSE b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5d2693890dddc34129973f5613afd88767213b24 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/LICENSE @@ -0,0 +1,33 @@ +Copyright (c) 2010 by Armin Ronacher and contributors. See AUTHORS +for more details. + +Some rights reserved. + +Redistribution and use in source and binary forms of the software as well +as documentation, with or without modification, are permitted provided +that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +* The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT +NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/METADATA b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..25a3ad162a11e5d56a314b17e5df14133d027f9e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/METADATA @@ -0,0 +1,135 @@ +Metadata-Version: 2.1 +Name: MarkupSafe +Version: 1.0 +Summary: Implements a XML/HTML/XHTML Markup safe string for Python +Home-page: http://github.com/pallets/markupsafe +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML + +MarkupSafe +========== + +Implements a unicode subclass that supports HTML strings: + +.. code-block:: python + + >>> from markupsafe import Markup, escape + >>> escape("") + Markup(u'<script>alert(document.cookie);</script>') + >>> tmpl = Markup("%s") + >>> tmpl % "Peter > Lustig" + Markup(u'Peter > Lustig') + +If you want to make an object unicode that is not yet unicode +but don't want to lose the taint information, you can use the +``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which +is a different name for the same function). + +.. code-block:: python + + >>> from markupsafe import soft_unicode + >>> soft_unicode(42) + u'42' + >>> soft_unicode(Markup('foo')) + Markup(u'foo') + +HTML Representations +-------------------- + +Objects can customize their HTML markup equivalent by overriding +the ``__html__`` function: + +.. code-block:: python + + >>> class Foo(object): + ... def __html__(self): + ... return 'Nice' + ... + >>> escape(Foo()) + Markup(u'Nice') + >>> Markup(Foo()) + Markup(u'Nice') + +Silent Escapes +-------------- + +Since MarkupSafe 0.10 there is now also a separate escape function +called ``escape_silent`` that returns an empty string for ``None`` for +consistency with other systems that return empty strings for ``None`` +when escaping (for instance Pylons' webhelpers). + +If you also want to use this for the escape method of the Markup +object, you can create your own subclass that does that: + +.. code-block:: python + + from markupsafe import Markup, escape_silent as escape + + class SilentMarkup(Markup): + __slots__ = () + + @classmethod + def escape(cls, s): + return cls(escape(s)) + +New-Style String Formatting +--------------------------- + +Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and +3.x are now fully supported. Previously the escape behavior of those +functions was spotty at best. The new implementations operates under the +following algorithm: + +1. if an object has an ``__html_format__`` method it is called as + replacement for ``__format__`` with the format specifier. It either + has to return a string or markup object. +2. if an object has an ``__html__`` method it is called. +3. otherwise the default format system of Python kicks in and the result + is HTML escaped. + +Here is how you can implement your own formatting: + +.. code-block:: python + + class User(object): + + def __init__(self, id, username): + self.id = id + self.username = username + + def __html_format__(self, format_spec): + if format_spec == 'link': + return Markup('{1}').format( + self.id, + self.__html__(), + ) + elif format_spec: + raise ValueError('Invalid format spec') + return self.__html__() + + def __html__(self): + return Markup('{0}').format(self.username) + +And to format that user: + +.. code-block:: python + + >>> user = User(1, 'foo') + >>> Markup('

User: {0:link}').format(user) + Markup(u'

User: foo') + +Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher. + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/RECORD b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..af860f3b74ee0abfb6f9227cf648f5a1dbcac58d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/RECORD @@ -0,0 +1,16 @@ +MarkupSafe-1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +MarkupSafe-1.0.dist-info/LICENSE,sha256=C76IIo_WPSDsCX9k5Y1aCkZRI64TkUChjUBsYLSIJLU,1582 +MarkupSafe-1.0.dist-info/METADATA,sha256=RTBfxOEfHqiY9goR2QvR2sG0-pRm52r0QWcGi_pUYCQ,4182 +MarkupSafe-1.0.dist-info/RECORD,, +MarkupSafe-1.0.dist-info/WHEEL,sha256=-Crjs1WwpTj5CCeFg4GKXWPpZsiCLs9UbQGH1WBfXpw,105 +MarkupSafe-1.0.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 +markupsafe/__init__.py,sha256=xtkRdxhzJzgp65wUo1D4DjnazxHU88pPldaAuDekBeY,10697 +markupsafe/__init__.pyc,, +markupsafe/_compat.py,sha256=r1HE0CpcAZeb-AiTV9wITR91PeLHn0CzZ_XHkYoozpI,565 +markupsafe/_compat.pyc,, +markupsafe/_constants.py,sha256=U_xybFQsyXKCgHSfranJnFzo-z9nn9fuBeSk243sE5Q,4795 +markupsafe/_constants.pyc,, +markupsafe/_native.py,sha256=E2Un1ysOf-w45d18YCj8UelT5UP7Vt__IuFPYJ7YRIs,1187 +markupsafe/_native.pyc,, +markupsafe/_speedups.c,sha256=B6Mf6Fn33WqkagfwY7q5ZBSm_vJoHDYxDB0Jp_DP7Jw,5936 +markupsafe/_speedups.so,sha256=45IyeMuudiT9zUlxOrXabD_Xz-GRanw8Wk4pJV7-pbc,34864 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/WHEEL b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..c124558c3005dbae0f043491744d4bef502147d6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.32.2) +Root-Is-Purelib: false +Tag: cp27-cp27mu-linux_x86_64 + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/top_level.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..75bf729258f9daef77370b6df1a57940f90fc23f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/MarkupSafe-1.0.dist-info/top_level.txt @@ -0,0 +1 @@ +markupsafe diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libfreetype-6ed94974.so.6.16.1 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libfreetype-6ed94974.so.6.16.1 new file mode 100755 index 0000000000000000000000000000000000000000..863b3810411e495d4845f5664ff07a6bc2b1e15c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libfreetype-6ed94974.so.6.16.1 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libjpeg-3fe7dfc0.so.9.3.0 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libjpeg-3fe7dfc0.so.9.3.0 new file mode 100755 index 0000000000000000000000000000000000000000..835b57be5527bfce56e507d8ae10bcd248aaaed2 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libjpeg-3fe7dfc0.so.9.3.0 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/liblcms2-a6801db4.so.2.0.8 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/liblcms2-a6801db4.so.2.0.8 new file mode 100755 index 0000000000000000000000000000000000000000..6f10af240e4d9df5cd24df327db58a1ffa8115e3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/liblcms2-a6801db4.so.2.0.8 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/liblzma-90de1f11.so.5.2.2 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/liblzma-90de1f11.so.5.2.2 new file mode 100755 index 0000000000000000000000000000000000000000..0e22b2e0c9cb5a43b0754dda618824229a460323 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/liblzma-90de1f11.so.5.2.2 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libopenjp2-e366d6b0.so.2.1.0 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libopenjp2-e366d6b0.so.2.1.0 new file mode 100755 index 0000000000000000000000000000000000000000..c31cea41cf8aa4819f72b5935ee897445b25d7ec Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libopenjp2-e366d6b0.so.2.1.0 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libpng16-8793a1b2.so.16.32.0 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libpng16-8793a1b2.so.16.32.0 new file mode 100755 index 0000000000000000000000000000000000000000..219f41b321dfda7e4b08a73043de7244d6976e3e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libpng16-8793a1b2.so.16.32.0 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libtiff-8a6d997d.so.5.3.0 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libtiff-8a6d997d.so.5.3.0 new file mode 100755 index 0000000000000000000000000000000000000000..c2ac40fa11b9d42bb39259a7e5fc53c012d0d94a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libtiff-8a6d997d.so.5.3.0 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libwebp-8ccd29fd.so.7.0.2 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libwebp-8ccd29fd.so.7.0.2 new file mode 100755 index 0000000000000000000000000000000000000000..b2f0763847a525fa3ba460237cb01d27660da669 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libwebp-8ccd29fd.so.7.0.2 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libwebpdemux-eba3dc32.so.2.0.4 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libwebpdemux-eba3dc32.so.2.0.4 new file mode 100755 index 0000000000000000000000000000000000000000..66adccb711f252519610bb16457096bee256ef6d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libwebpdemux-eba3dc32.so.2.0.4 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libwebpmux-1c63fe99.so.3.0.2 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libwebpmux-1c63fe99.so.3.0.2 new file mode 100755 index 0000000000000000000000000000000000000000..2045a77aafad5bd28d1f13cfaa454417dc72c78a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libwebpmux-1c63fe99.so.3.0.2 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libz-a147dcb0.so.1.2.3 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libz-a147dcb0.so.1.2.3 new file mode 100755 index 0000000000000000000000000000000000000000..c123f89555bde14ea84f765469b0424b12dd0af9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/.libs/libz-a147dcb0.so.1.2.3 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BdfFontFile.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BdfFontFile.py new file mode 100644 index 0000000000000000000000000000000000000000..c8bc604610af4894921fb5e4f723fec5b781b6ee --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BdfFontFile.py @@ -0,0 +1,133 @@ +# +# The Python Imaging Library +# $Id$ +# +# bitmap distribution font (bdf) file parser +# +# history: +# 1996-05-16 fl created (as bdf2pil) +# 1997-08-25 fl converted to FontFile driver +# 2001-05-25 fl removed bogus __init__ call +# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev) +# 2003-04-22 fl more robustification (from Graham Dumpleton) +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1997-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +from __future__ import print_function + +from . import Image, FontFile + + +# -------------------------------------------------------------------- +# parse X Bitmap Distribution Format (BDF) +# -------------------------------------------------------------------- + +bdf_slant = { + "R": "Roman", + "I": "Italic", + "O": "Oblique", + "RI": "Reverse Italic", + "RO": "Reverse Oblique", + "OT": "Other" +} + +bdf_spacing = { + "P": "Proportional", + "M": "Monospaced", + "C": "Cell" +} + + +def bdf_char(f): + # skip to STARTCHAR + while True: + s = f.readline() + if not s: + return None + if s[:9] == b"STARTCHAR": + break + id = s[9:].strip().decode('ascii') + + # load symbol properties + props = {} + while True: + s = f.readline() + if not s or s[:6] == b"BITMAP": + break + i = s.find(b" ") + props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii') + + # load bitmap + bitmap = [] + while True: + s = f.readline() + if not s or s[:7] == b"ENDCHAR": + break + bitmap.append(s[:-1]) + bitmap = b"".join(bitmap) + + [x, y, l, d] = [int(p) for p in props["BBX"].split()] + [dx, dy] = [int(p) for p in props["DWIDTH"].split()] + + bbox = (dx, dy), (l, -d-y, x+l, -d), (0, 0, x, y) + + try: + im = Image.frombytes("1", (x, y), bitmap, "hex", "1") + except ValueError: + # deal with zero-width characters + im = Image.new("1", (x, y)) + + return id, int(props["ENCODING"]), bbox, im + + +## +# Font file plugin for the X11 BDF format. + +class BdfFontFile(FontFile.FontFile): + + def __init__(self, fp): + + FontFile.FontFile.__init__(self) + + s = fp.readline() + if s[:13] != b"STARTFONT 2.1": + raise SyntaxError("not a valid BDF file") + + props = {} + comments = [] + + while True: + s = fp.readline() + if not s or s[:13] == b"ENDPROPERTIES": + break + i = s.find(b" ") + props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii') + if s[:i] in [b"COMMENT", b"COPYRIGHT"]: + if s.find(b"LogicalFontDescription") < 0: + comments.append(s[i+1:-1].decode('ascii')) + + # font = props["FONT"].split("-") + + # font[4] = bdf_slant[font[4].upper()] + # font[11] = bdf_spacing[font[11].upper()] + + # ascent = int(props["FONT_ASCENT"]) + # descent = int(props["FONT_DESCENT"]) + + # fontname = ";".join(font[1:]) + + # print("#", fontname) + # for i in comments: + # print("#", i) + + while True: + c = bdf_char(fp) + if not c: + break + id, ch, (xy, dst, src), im = c + if 0 <= ch < len(self.glyph): + self.glyph[ch] = xy, dst, src, im diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BdfFontFile.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BdfFontFile.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ab93272d946e3a4d9d1a327fccbeb57e99ee21c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BdfFontFile.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BlpImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BlpImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..9b1a99ae1b336f95b32546164d5649b31d98be20 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BlpImagePlugin.py @@ -0,0 +1,435 @@ +""" +Blizzard Mipmap Format (.blp) +Jerome Leclanche + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ + +BLP1 files, used mostly in Warcraft III, are not fully supported. +All types of BLP2 files used in World of Warcraft are supported. + +The BLP file structure consists of a header, up to 16 mipmaps of the +texture + +Texture sizes must be powers of two, though the two dimensions do +not have to be equal; 512x256 is valid, but 512x200 is not. +The first mipmap (mipmap #0) is the full size image; each subsequent +mipmap halves both dimensions. The final mipmap should be 1x1. + +BLP files come in many different flavours: +* JPEG-compressed (type == 0) - only supported for BLP1. +* RAW images (type == 1, encoding == 1). Each mipmap is stored as an + array of 8-bit values, one per pixel, left to right, top to bottom. + Each value is an index to the palette. +* DXT-compressed (type == 1, encoding == 2): +- DXT1 compression is used if alpha_encoding == 0. + - An additional alpha bit is used if alpha_depth == 1. + - DXT3 compression is used if alpha_encoding == 1. + - DXT5 compression is used if alpha_encoding == 7. +""" + +import struct +from io import BytesIO + +from . import Image, ImageFile + + +BLP_FORMAT_JPEG = 0 + +BLP_ENCODING_UNCOMPRESSED = 1 +BLP_ENCODING_DXT = 2 +BLP_ENCODING_UNCOMPRESSED_RAW_BGRA = 3 + +BLP_ALPHA_ENCODING_DXT1 = 0 +BLP_ALPHA_ENCODING_DXT3 = 1 +BLP_ALPHA_ENCODING_DXT5 = 7 + + +def unpack_565(i): + return ( + ((i >> 11) & 0x1f) << 3, + ((i >> 5) & 0x3f) << 2, + (i & 0x1f) << 3 + ) + + +def decode_dxt1(data, alpha=False): + """ + input: one "row" of data (i.e. will produce 4*width pixels) + """ + + blocks = len(data) // 8 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block in range(blocks): + # Decode next 8-byte block. + idx = block * 8 + color0, color1, bits = struct.unpack_from("> 2 + + a = 0xFF + if control == 0: + r, g, b = r0, g0, b0 + elif control == 1: + r, g, b = r1, g1, b1 + elif control == 2: + if color0 > color1: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + else: + r = (r0 + r1) // 2 + g = (g0 + g1) // 2 + b = (b0 + b1) // 2 + elif control == 3: + if color0 > color1: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + else: + r, g, b, a = 0, 0, 0, 0 + + if alpha: + ret[j].extend([r, g, b, a]) + else: + ret[j].extend([r, g, b]) + + return ret + + +def decode_dxt3(data): + """ + input: one "row" of data (i.e. will produce 4*width pixels) + """ + + blocks = len(data) // 16 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block in range(blocks): + idx = block * 16 + block = data[idx:idx + 16] + # Decode next 16-byte block. + bits = struct.unpack_from("<8B", block) + color0, color1 = struct.unpack_from(">= 4 + else: + high = True + a &= 0xf + a *= 17 # We get a value between 0 and 15 + + color_code = (code >> 2 * (4 * j + i)) & 0x03 + + if color_code == 0: + r, g, b = r0, g0, b0 + elif color_code == 1: + r, g, b = r1, g1, b1 + elif color_code == 2: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + elif color_code == 3: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + + ret[j].extend([r, g, b, a]) + + return ret + + +def decode_dxt5(data): + """ + input: one "row" of data (i.e. will produce 4 * width pixels) + """ + + blocks = len(data) // 16 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block in range(blocks): + idx = block * 16 + block = data[idx:idx + 16] + # Decode next 16-byte block. + a0, a1 = struct.unpack_from("> alphacode_index) & 0x07 + elif alphacode_index == 15: + alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06) + else: # alphacode_index >= 18 and alphacode_index <= 45 + alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07 + + if alphacode == 0: + a = a0 + elif alphacode == 1: + a = a1 + elif a0 > a1: + a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7 + elif alphacode == 6: + a = 0 + elif alphacode == 7: + a = 255 + else: + a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5 + + color_code = (code >> 2 * (4 * j + i)) & 0x03 + + if color_code == 0: + r, g, b = r0, g0, b0 + elif color_code == 1: + r, g, b = r1, g1, b1 + elif color_code == 2: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + elif color_code == 3: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + + ret[j].extend([r, g, b, a]) + + return ret + + +class BLPFormatError(NotImplementedError): + pass + + +class BlpImageFile(ImageFile.ImageFile): + """ + Blizzard Mipmap Format + """ + format = "BLP" + format_description = "Blizzard Mipmap Format" + + def _open(self): + self.magic = self.fp.read(4) + self._read_blp_header() + + if self.magic == b"BLP1": + decoder = "BLP1" + self.mode = "RGB" + elif self.magic == b"BLP2": + decoder = "BLP2" + self.mode = "RGBA" if self._blp_alpha_depth else "RGB" + else: + raise BLPFormatError("Bad BLP magic %r" % (self.magic)) + + self.tile = [ + (decoder, (0, 0) + self.size, 0, (self.mode, 0, 1)) + ] + + def _read_blp_header(self): + self._blp_compression, = struct.unpack(" mode, rawmode + 1: ("P", "P;1"), + 4: ("P", "P;4"), + 8: ("P", "P"), + 16: ("RGB", "BGR;15"), + 24: ("RGB", "BGR"), + 32: ("RGB", "BGRX"), +} + + +def _accept(prefix): + return prefix[:2] == b"BM" + + +# ============================================================================== +# Image plugin for the Windows BMP format. +# ============================================================================== +class BmpImageFile(ImageFile.ImageFile): + """ Image plugin for the Windows Bitmap format (BMP) """ + + # -------------------------------------------------------------- Description + format_description = "Windows Bitmap" + format = "BMP" + # --------------------------------------------------- BMP Compression values + COMPRESSIONS = {'RAW': 0, 'RLE8': 1, 'RLE4': 2, 'BITFIELDS': 3, 'JPEG': 4, 'PNG': 5} + RAW, RLE8, RLE4, BITFIELDS, JPEG, PNG = 0, 1, 2, 3, 4, 5 + + def _bitmap(self, header=0, offset=0): + """ Read relevant info about the BMP """ + read, seek = self.fp.read, self.fp.seek + if header: + seek(header) + file_info = {} + file_info['header_size'] = i32(read(4)) # read bmp header size @offset 14 (this is part of the header size) + file_info['direction'] = -1 + # --------------------- If requested, read header at a specific position + header_data = ImageFile._safe_read(self.fp, file_info['header_size'] - 4) # read the rest of the bmp header, without its size + # --------------------------------------------------- IBM OS/2 Bitmap v1 + # ------ This format has different offsets because of width/height types + if file_info['header_size'] == 12: + file_info['width'] = i16(header_data[0:2]) + file_info['height'] = i16(header_data[2:4]) + file_info['planes'] = i16(header_data[4:6]) + file_info['bits'] = i16(header_data[6:8]) + file_info['compression'] = self.RAW + file_info['palette_padding'] = 3 + # ---------------------------------------------- Windows Bitmap v2 to v5 + elif file_info['header_size'] in (40, 64, 108, 124): # v3, OS/2 v2, v4, v5 + if file_info['header_size'] >= 40: # v3 and OS/2 + file_info['y_flip'] = i8(header_data[7]) == 0xff + file_info['direction'] = 1 if file_info['y_flip'] else -1 + file_info['width'] = i32(header_data[0:4]) + file_info['height'] = i32(header_data[4:8]) if not file_info['y_flip'] else 2**32 - i32(header_data[4:8]) + file_info['planes'] = i16(header_data[8:10]) + file_info['bits'] = i16(header_data[10:12]) + file_info['compression'] = i32(header_data[12:16]) + file_info['data_size'] = i32(header_data[16:20]) # byte size of pixel data + file_info['pixels_per_meter'] = (i32(header_data[20:24]), i32(header_data[24:28])) + file_info['colors'] = i32(header_data[28:32]) + file_info['palette_padding'] = 4 + self.info["dpi"] = tuple( + map(lambda x: int(math.ceil(x / 39.3701)), + file_info['pixels_per_meter'])) + if file_info['compression'] == self.BITFIELDS: + if len(header_data) >= 52: + for idx, mask in enumerate(['r_mask', 'g_mask', 'b_mask', 'a_mask']): + file_info[mask] = i32(header_data[36+idx*4:40+idx*4]) + else: + # 40 byte headers only have the three components in the bitfields masks, + # ref: https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx + # See also https://github.com/python-pillow/Pillow/issues/1293 + # There is a 4th component in the RGBQuad, in the alpha location, but it + # is listed as a reserved component, and it is not generally an alpha channel + file_info['a_mask'] = 0x0 + for mask in ['r_mask', 'g_mask', 'b_mask']: + file_info[mask] = i32(read(4)) + file_info['rgb_mask'] = (file_info['r_mask'], file_info['g_mask'], file_info['b_mask']) + file_info['rgba_mask'] = (file_info['r_mask'], file_info['g_mask'], file_info['b_mask'], file_info['a_mask']) + else: + raise IOError("Unsupported BMP header type (%d)" % file_info['header_size']) + # ------------------ Special case : header is reported 40, which + # ---------------------- is shorter than real size for bpp >= 16 + self.size = file_info['width'], file_info['height'] + # -------- If color count was not found in the header, compute from bits + file_info['colors'] = file_info['colors'] if file_info.get('colors', 0) else (1 << file_info['bits']) + # -------------------------------- Check abnormal values for DOS attacks + if file_info['width'] * file_info['height'] > 2**31: + raise IOError("Unsupported BMP Size: (%dx%d)" % self.size) + # ----------------------- Check bit depth for unusual unsupported values + self.mode, raw_mode = BIT2MODE.get(file_info['bits'], (None, None)) + if self.mode is None: + raise IOError("Unsupported BMP pixel depth (%d)" % file_info['bits']) + # ----------------- Process BMP with Bitfields compression (not palette) + if file_info['compression'] == self.BITFIELDS: + SUPPORTED = { + 32: [(0xff0000, 0xff00, 0xff, 0x0), (0xff0000, 0xff00, 0xff, 0xff000000), (0x0, 0x0, 0x0, 0x0), (0xff000000, 0xff0000, 0xff00, 0x0)], + 24: [(0xff0000, 0xff00, 0xff)], + 16: [(0xf800, 0x7e0, 0x1f), (0x7c00, 0x3e0, 0x1f)] + } + MASK_MODES = { + (32, (0xff0000, 0xff00, 0xff, 0x0)): "BGRX", + (32, (0xff000000, 0xff0000, 0xff00, 0x0)): "XBGR", + (32, (0xff0000, 0xff00, 0xff, 0xff000000)): "BGRA", + (32, (0x0, 0x0, 0x0, 0x0)): "BGRA", + (24, (0xff0000, 0xff00, 0xff)): "BGR", + (16, (0xf800, 0x7e0, 0x1f)): "BGR;16", + (16, (0x7c00, 0x3e0, 0x1f)): "BGR;15" + } + if file_info['bits'] in SUPPORTED: + if file_info['bits'] == 32 and file_info['rgba_mask'] in SUPPORTED[file_info['bits']]: + raw_mode = MASK_MODES[(file_info['bits'], file_info['rgba_mask'])] + self.mode = "RGBA" if raw_mode in ("BGRA",) else self.mode + elif file_info['bits'] in (24, 16) and file_info['rgb_mask'] in SUPPORTED[file_info['bits']]: + raw_mode = MASK_MODES[(file_info['bits'], file_info['rgb_mask'])] + else: + raise IOError("Unsupported BMP bitfields layout") + else: + raise IOError("Unsupported BMP bitfields layout") + elif file_info['compression'] == self.RAW: + if file_info['bits'] == 32 and header == 22: # 32-bit .cur offset + raw_mode, self.mode = "BGRA", "RGBA" + else: + raise IOError("Unsupported BMP compression (%d)" % file_info['compression']) + # ---------------- Once the header is processed, process the palette/LUT + if self.mode == "P": # Paletted for 1, 4 and 8 bit images + # ----------------------------------------------------- 1-bit images + if not (0 < file_info['colors'] <= 65536): + raise IOError("Unsupported BMP Palette size (%d)" % file_info['colors']) + else: + padding = file_info['palette_padding'] + palette = read(padding * file_info['colors']) + greyscale = True + indices = (0, 255) if file_info['colors'] == 2 else list(range(file_info['colors'])) + # ------------------ Check if greyscale and ignore palette if so + for ind, val in enumerate(indices): + rgb = palette[ind*padding:ind*padding + 3] + if rgb != o8(val) * 3: + greyscale = False + # -------- If all colors are grey, white or black, ditch palette + if greyscale: + self.mode = "1" if file_info['colors'] == 2 else "L" + raw_mode = self.mode + else: + self.mode = "P" + self.palette = ImagePalette.raw("BGRX" if padding == 4 else "BGR", palette) + + # ----------------------------- Finally set the tile data for the plugin + self.info['compression'] = file_info['compression'] + self.tile = [('raw', (0, 0, file_info['width'], file_info['height']), offset or self.fp.tell(), + (raw_mode, ((file_info['width'] * file_info['bits'] + 31) >> 3) & (~3), file_info['direction']) + )] + + def _open(self): + """ Open file, check magic number and read header """ + # read 14 bytes: magic number, filesize, reserved, header final offset + head_data = self.fp.read(14) + # choke if the file does not have the required magic bytes + if head_data[0:2] != b"BM": + raise SyntaxError("Not a BMP file") + # read the start position of the BMP image data (u32) + offset = i32(head_data[10:14]) + # load bitmap information (offset=raster info) + self._bitmap(offset=offset) + + +# ============================================================================== +# Image plugin for the DIB format (BMP alias) +# ============================================================================== +class DibImageFile(BmpImageFile): + + format = "DIB" + format_description = "Windows Bitmap" + + def _open(self): + self._bitmap() + +# +# -------------------------------------------------------------------- +# Write BMP file + + +SAVE = { + "1": ("1", 1, 2), + "L": ("L", 8, 256), + "P": ("P", 8, 256), + "RGB": ("BGR", 24, 0), + "RGBA": ("BGRA", 32, 0), +} + + +def _save(im, fp, filename): + try: + rawmode, bits, colors = SAVE[im.mode] + except KeyError: + raise IOError("cannot write mode %s as BMP" % im.mode) + + info = im.encoderinfo + + dpi = info.get("dpi", (96, 96)) + + # 1 meter == 39.3701 inches + ppm = tuple(map(lambda x: int(x * 39.3701), dpi)) + + stride = ((im.size[0]*bits+7)//8+3) & (~3) + header = 40 # or 64 for OS/2 version 2 + offset = 14 + header + colors * 4 + image = stride * im.size[1] + + # bitmap header + fp.write(b"BM" + # file type (magic) + o32(offset+image) + # file size + o32(0) + # reserved + o32(offset)) # image data offset + + # bitmap info header + fp.write(o32(header) + # info header size + o32(im.size[0]) + # width + o32(im.size[1]) + # height + o16(1) + # planes + o16(bits) + # depth + o32(0) + # compression (0=uncompressed) + o32(image) + # size of bitmap + o32(ppm[0]) + o32(ppm[1]) + # resolution + o32(colors) + # colors used + o32(colors)) # colors important + + fp.write(b"\0" * (header - 40)) # padding (for OS/2 format) + + if im.mode == "1": + for i in (0, 255): + fp.write(o8(i) * 4) + elif im.mode == "L": + for i in range(256): + fp.write(o8(i) * 4) + elif im.mode == "P": + fp.write(im.im.getpalette("RGB", "BGRX")) + + ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, + (rawmode, stride, -1))]) + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(BmpImageFile.format, BmpImageFile, _accept) +Image.register_save(BmpImageFile.format, _save) + +Image.register_extension(BmpImageFile.format, ".bmp") + +Image.register_mime(BmpImageFile.format, "image/bmp") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BmpImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BmpImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fc37fbb55246bc49476d683e101802951b4e8ca Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BmpImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BufrStubImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BufrStubImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..16d83c74def2a2797680f72345162e4799d131f7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BufrStubImagePlugin.py @@ -0,0 +1,72 @@ +# +# The Python Imaging Library +# $Id$ +# +# BUFR stub adapter +# +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific BUFR image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + +def _accept(prefix): + return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC" + + +class BufrStubImageFile(ImageFile.StubImageFile): + + format = "BUFR" + format_description = "BUFR" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(4)): + raise SyntaxError("Not a BUFR file") + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self.size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise IOError("BUFR save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept) +Image.register_save(BufrStubImageFile.format, _save) + +Image.register_extension(BufrStubImageFile.format, ".bufr") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BufrStubImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BufrStubImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ee8316e722694657d3d896cddbab922fce76997 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/BufrStubImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ContainerIO.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ContainerIO.py new file mode 100644 index 0000000000000000000000000000000000000000..496ed68263d4dd5ffb28b8d8cbbcc8ad34ffa976 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ContainerIO.py @@ -0,0 +1,116 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a class to read from a container file +# +# History: +# 1995-06-18 fl Created +# 1995-09-07 fl Added readline(), readlines() +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1995 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +## +# A file object that provides read access to a part of an existing +# file (for example a TAR file). + + +class ContainerIO(object): + + def __init__(self, file, offset, length): + """ + Create file object. + + :param file: Existing file. + :param offset: Start of region, in bytes. + :param length: Size of region, in bytes. + """ + self.fh = file + self.pos = 0 + self.offset = offset + self.length = length + self.fh.seek(offset) + + ## + # Always false. + + def isatty(self): + return 0 + + def seek(self, offset, mode=0): + """ + Move file pointer. + + :param offset: Offset in bytes. + :param mode: Starting position. Use 0 for beginning of region, 1 + for current offset, and 2 for end of region. You cannot move + the pointer outside the defined region. + """ + if mode == 1: + self.pos = self.pos + offset + elif mode == 2: + self.pos = self.length + offset + else: + self.pos = offset + # clamp + self.pos = max(0, min(self.pos, self.length)) + self.fh.seek(self.offset + self.pos) + + def tell(self): + """ + Get current file pointer. + + :returns: Offset from start of region, in bytes. + """ + return self.pos + + def read(self, n=0): + """ + Read data. + + :param n: Number of bytes to read. If omitted or zero, + read until end of region. + :returns: An 8-bit string. + """ + if n: + n = min(n, self.length - self.pos) + else: + n = self.length - self.pos + if not n: # EOF + return "" + self.pos = self.pos + n + return self.fh.read(n) + + def readline(self): + """ + Read a line of text. + + :returns: An 8-bit string. + """ + s = "" + while True: + c = self.read(1) + if not c: + break + s = s + c + if c == "\n": + break + return s + + def readlines(self): + """ + Read multiple lines of text. + + :returns: A list of 8-bit strings. + """ + l = [] + while True: + s = self.readline() + if not s: + break + l.append(s) + return l diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ContainerIO.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ContainerIO.pyc new file mode 100644 index 0000000000000000000000000000000000000000..148b19b7c21348647d9cb3f3ff8d1183ae3e4e25 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ContainerIO.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/CurImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/CurImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..e4257cd5a47adaf1095b9a7bdbbdd6c220309562 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/CurImagePlugin.py @@ -0,0 +1,86 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Windows Cursor support for PIL +# +# notes: +# uses BmpImagePlugin.py to read the bitmap data. +# +# history: +# 96-05-27 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +from __future__ import print_function + +from . import Image, BmpImagePlugin +from ._binary import i8, i16le as i16, i32le as i32 + +__version__ = "0.1" + +# +# -------------------------------------------------------------------- + + +def _accept(prefix): + return prefix[:4] == b"\0\0\2\0" + + +## +# Image plugin for Windows Cursor files. + +class CurImageFile(BmpImagePlugin.BmpImageFile): + + format = "CUR" + format_description = "Windows Cursor" + + def _open(self): + + offset = self.fp.tell() + + # check magic + s = self.fp.read(6) + if not _accept(s): + raise SyntaxError("not a CUR file") + + # pick the largest cursor in the file + m = b"" + for i in range(i16(s[4:])): + s = self.fp.read(16) + if not m: + m = s + elif i8(s[0]) > i8(m[0]) and i8(s[1]) > i8(m[1]): + m = s + # print("width", i8(s[0])) + # print("height", i8(s[1])) + # print("colors", i8(s[2])) + # print("reserved", i8(s[3])) + # print("hotspot x", i16(s[4:])) + # print("hotspot y", i16(s[6:])) + # print("bytes", i32(s[8:])) + # print("offset", i32(s[12:])) + if not m: + raise TypeError("No cursors were found") + + # load as bitmap + self._bitmap(i32(m[12:]) + offset) + + # patch up the bitmap height + self.size = self.size[0], self.size[1]//2 + d, e, o, a = self.tile[0] + self.tile[0] = d, (0, 0)+self.size, o, a + + return + + +# +# -------------------------------------------------------------------- + +Image.register_open(CurImageFile.format, CurImageFile, _accept) + +Image.register_extension(CurImageFile.format, ".cur") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/CurImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/CurImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf311b4ea87205e1ebad48264fd4909d205bf84a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/CurImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/DcxImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/DcxImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..2045927596f63f68b606cbeac05b04e0d8505367 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/DcxImagePlugin.py @@ -0,0 +1,87 @@ +# +# The Python Imaging Library. +# $Id$ +# +# DCX file handling +# +# DCX is a container file format defined by Intel, commonly used +# for fax applications. Each DCX file consists of a directory +# (a list of file offsets) followed by a set of (usually 1-bit) +# PCX files. +# +# History: +# 1995-09-09 fl Created +# 1996-03-20 fl Properly derived from PcxImageFile. +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 2002-07-30 fl Fixed file handling +# +# Copyright (c) 1997-98 by Secret Labs AB. +# Copyright (c) 1995-96 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +from . import Image +from ._binary import i32le as i32 +from .PcxImagePlugin import PcxImageFile + +__version__ = "0.2" + +MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then? + + +def _accept(prefix): + return len(prefix) >= 4 and i32(prefix) == MAGIC + + +## +# Image plugin for the Intel DCX format. + +class DcxImageFile(PcxImageFile): + + format = "DCX" + format_description = "Intel DCX" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # Header + s = self.fp.read(4) + if i32(s) != MAGIC: + raise SyntaxError("not a DCX file") + + # Component directory + self._offset = [] + for i in range(1024): + offset = i32(self.fp.read(4)) + if not offset: + break + self._offset.append(offset) + + self.__fp = self.fp + self.frame = None + self.seek(0) + + @property + def n_frames(self): + return len(self._offset) + + @property + def is_animated(self): + return len(self._offset) > 1 + + def seek(self, frame): + if not self._seek_check(frame): + return + self.frame = frame + self.fp = self.__fp + self.fp.seek(self._offset[frame]) + PcxImageFile._open(self) + + def tell(self): + return self.frame + + +Image.register_open(DcxImageFile.format, DcxImageFile, _accept) + +Image.register_extension(DcxImageFile.format, ".dcx") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/DcxImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/DcxImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..630f0f7025acdf3c091cf79b97941554cb5482fd Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/DcxImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/DdsImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/DdsImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..e755f94b9d97ca892506d2d4f629e9d4ba892a0c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/DdsImagePlugin.py @@ -0,0 +1,172 @@ +""" +A Pillow loader for .dds files (S3TC-compressed aka DXTC) +Jerome Leclanche + +Documentation: + https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ +""" + +import struct +from io import BytesIO +from . import Image, ImageFile + + +# Magic ("DDS ") +DDS_MAGIC = 0x20534444 + +# DDS flags +DDSD_CAPS = 0x1 +DDSD_HEIGHT = 0x2 +DDSD_WIDTH = 0x4 +DDSD_PITCH = 0x8 +DDSD_PIXELFORMAT = 0x1000 +DDSD_MIPMAPCOUNT = 0x20000 +DDSD_LINEARSIZE = 0x80000 +DDSD_DEPTH = 0x800000 + +# DDS caps +DDSCAPS_COMPLEX = 0x8 +DDSCAPS_TEXTURE = 0x1000 +DDSCAPS_MIPMAP = 0x400000 + +DDSCAPS2_CUBEMAP = 0x200 +DDSCAPS2_CUBEMAP_POSITIVEX = 0x400 +DDSCAPS2_CUBEMAP_NEGATIVEX = 0x800 +DDSCAPS2_CUBEMAP_POSITIVEY = 0x1000 +DDSCAPS2_CUBEMAP_NEGATIVEY = 0x2000 +DDSCAPS2_CUBEMAP_POSITIVEZ = 0x4000 +DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x8000 +DDSCAPS2_VOLUME = 0x200000 + +# Pixel Format +DDPF_ALPHAPIXELS = 0x1 +DDPF_ALPHA = 0x2 +DDPF_FOURCC = 0x4 +DDPF_PALETTEINDEXED8 = 0x20 +DDPF_RGB = 0x40 +DDPF_LUMINANCE = 0x20000 + + +# dds.h + +DDS_FOURCC = DDPF_FOURCC +DDS_RGB = DDPF_RGB +DDS_RGBA = DDPF_RGB | DDPF_ALPHAPIXELS +DDS_LUMINANCE = DDPF_LUMINANCE +DDS_LUMINANCEA = DDPF_LUMINANCE | DDPF_ALPHAPIXELS +DDS_ALPHA = DDPF_ALPHA +DDS_PAL8 = DDPF_PALETTEINDEXED8 + +DDS_HEADER_FLAGS_TEXTURE = (DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH | + DDSD_PIXELFORMAT) +DDS_HEADER_FLAGS_MIPMAP = DDSD_MIPMAPCOUNT +DDS_HEADER_FLAGS_VOLUME = DDSD_DEPTH +DDS_HEADER_FLAGS_PITCH = DDSD_PITCH +DDS_HEADER_FLAGS_LINEARSIZE = DDSD_LINEARSIZE + +DDS_HEIGHT = DDSD_HEIGHT +DDS_WIDTH = DDSD_WIDTH + +DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS_TEXTURE +DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS_COMPLEX | DDSCAPS_MIPMAP +DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS_COMPLEX + +DDS_CUBEMAP_POSITIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEX +DDS_CUBEMAP_NEGATIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEX +DDS_CUBEMAP_POSITIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEY +DDS_CUBEMAP_NEGATIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEY +DDS_CUBEMAP_POSITIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEZ +DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEZ + + +# DXT1 +DXT1_FOURCC = 0x31545844 + +# DXT3 +DXT3_FOURCC = 0x33545844 + +# DXT5 +DXT5_FOURCC = 0x35545844 + + +# dxgiformat.h + +DXGI_FORMAT_BC7_TYPELESS = 97 +DXGI_FORMAT_BC7_UNORM = 98 +DXGI_FORMAT_BC7_UNORM_SRGB = 99 + + +class DdsImageFile(ImageFile.ImageFile): + format = "DDS" + format_description = "DirectDraw Surface" + + def _open(self): + magic, header_size = struct.unpack(" 0: + s = fp.read(min(lengthfile, 100*1024)) + if not s: + break + lengthfile -= len(s) + f.write(s) + + # Build ghostscript command + command = ["gs", + "-q", # quiet mode + "-g%dx%d" % size, # set output geometry (pixels) + "-r%fx%f" % res, # set input DPI (dots per inch) + "-dBATCH", # exit after processing + "-dNOPAUSE", # don't pause between pages, + "-dSAFER", # safe mode + "-sDEVICE=ppmraw", # ppm driver + "-sOutputFile=%s" % outfile, # output file + "-c", "%d %d translate" % (-bbox[0], -bbox[1]), + # adjust for image origin + "-f", infile, # input file + "-c", "showpage", # showpage (see: https://bugs.ghostscript.com/show_bug.cgi?id=698272) + ] + + if gs_windows_binary is not None: + if not gs_windows_binary: + raise WindowsError('Unable to locate Ghostscript on paths') + command[0] = gs_windows_binary + + # push data through ghostscript + try: + with open(os.devnull, 'w+b') as devnull: + subprocess.check_call(command, stdin=devnull, stdout=devnull) + im = Image.open(outfile) + im.load() + finally: + try: + os.unlink(outfile) + if infile_temp: + os.unlink(infile_temp) + except OSError: + pass + + return im.im.copy() + + +class PSFile(object): + """ + Wrapper for bytesio object that treats either CR or LF as end of line. + """ + def __init__(self, fp): + self.fp = fp + self.char = None + + def seek(self, offset, whence=0): + self.char = None + self.fp.seek(offset, whence) + + def readline(self): + s = self.char or b"" + self.char = None + + c = self.fp.read(1) + while c not in b"\r\n": + s = s + c + c = self.fp.read(1) + + self.char = self.fp.read(1) + # line endings can be 1 or 2 of \r \n, in either order + if self.char in b"\r\n": + self.char = None + + return s.decode('latin-1') + + +def _accept(prefix): + return prefix[:4] == b"%!PS" or \ + (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5) + +## +# Image plugin for Encapsulated Postscript. This plugin supports only +# a few variants of this format. + + +class EpsImageFile(ImageFile.ImageFile): + """EPS File Parser for the Python Imaging Library""" + + format = "EPS" + format_description = "Encapsulated Postscript" + + mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"} + + def _open(self): + (length, offset) = self._find_offset(self.fp) + + # Rewrap the open file pointer in something that will + # convert line endings and decode to latin-1. + try: + if py3: + # Python3, can use bare open command. + fp = open(self.fp.name, "Ur", encoding='latin-1') + else: + # Python2, no encoding conversion necessary + fp = open(self.fp.name, "Ur") + except: + # Expect this for bytesio/stringio + fp = PSFile(self.fp) + + # go to offset - start of "%!PS" + fp.seek(offset) + + box = None + + self.mode = "RGB" + self.size = 1, 1 # FIXME: huh? + + # + # Load EPS header + + s_raw = fp.readline() + s = s_raw.strip('\r\n') + + while s_raw: + if s: + if len(s) > 255: + raise SyntaxError("not an EPS file") + + try: + m = split.match(s) + except re.error as v: + raise SyntaxError("not an EPS file") + + if m: + k, v = m.group(1, 2) + self.info[k] = v + if k == "BoundingBox": + try: + # Note: The DSC spec says that BoundingBox + # fields should be integers, but some drivers + # put floating point values there anyway. + box = [int(float(i)) for i in v.split()] + self.size = box[2] - box[0], box[3] - box[1] + self.tile = [("eps", (0, 0) + self.size, offset, + (length, box))] + except: + pass + + else: + m = field.match(s) + if m: + k = m.group(1) + + if k == "EndComments": + break + if k[:8] == "PS-Adobe": + self.info[k[:8]] = k[9:] + else: + self.info[k] = "" + elif s[0] == '%': + # handle non-DSC Postscript comments that some + # tools mistakenly put in the Comments section + pass + else: + raise IOError("bad EPS header") + + s_raw = fp.readline() + s = s_raw.strip('\r\n') + + if s and s[:1] != "%": + break + + # + # Scan for an "ImageData" descriptor + + while s[:1] == "%": + + if len(s) > 255: + raise SyntaxError("not an EPS file") + + if s[:11] == "%ImageData:": + # Encoded bitmapped image. + x, y, bi, mo = s[11:].split(None, 7)[:4] + + if int(bi) != 8: + break + try: + self.mode = self.mode_map[int(mo)] + except ValueError: + break + + self.size = int(x), int(y) + return + + s = fp.readline().strip('\r\n') + if not s: + break + + if not box: + raise IOError("cannot determine EPS bounding box") + + def _find_offset(self, fp): + + s = fp.read(160) + + if s[:4] == b"%!PS": + # for HEAD without binary preview + fp.seek(0, 2) + length = fp.tell() + offset = 0 + elif i32(s[0:4]) == 0xC6D3D0C5: + # FIX for: Some EPS file not handled correctly / issue #302 + # EPS can contain binary data + # or start directly with latin coding + # more info see: + # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf + offset = i32(s[4:8]) + length = i32(s[8:12]) + else: + raise SyntaxError("not an EPS file") + + return (length, offset) + + def load(self, scale=1): + # Load EPS via Ghostscript + if not self.tile: + return + self.im = Ghostscript(self.tile, self.size, self.fp, scale) + self.mode = self.im.mode + self.size = self.im.size + self.tile = [] + + def load_seek(self, *args, **kwargs): + # we can't incrementally load, so force ImageFile.parser to + # use our custom load method by defining this method. + pass + + +# +# -------------------------------------------------------------------- + +def _save(im, fp, filename, eps=1): + """EPS Writer for the Python Imaging Library.""" + + # + # make sure image data is available + im.load() + + # + # determine postscript image mode + if im.mode == "L": + operator = (8, 1, "image") + elif im.mode == "RGB": + operator = (8, 3, "false 3 colorimage") + elif im.mode == "CMYK": + operator = (8, 4, "false 4 colorimage") + else: + raise ValueError("image mode is not supported") + + class NoCloseStream(object): + def __init__(self, fp): + self.fp = fp + + def __getattr__(self, name): + return getattr(self.fp, name) + + def close(self): + pass + + base_fp = fp + if fp != sys.stdout: + fp = NoCloseStream(fp) + if sys.version_info.major > 2: + fp = io.TextIOWrapper(fp, encoding='latin-1') + + if eps: + # + # write EPS header + fp.write("%!PS-Adobe-3.0 EPSF-3.0\n") + fp.write("%%Creator: PIL 0.1 EpsEncode\n") + # fp.write("%%CreationDate: %s"...) + fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size) + fp.write("%%Pages: 1\n") + fp.write("%%EndComments\n") + fp.write("%%Page: 1 1\n") + fp.write("%%ImageData: %d %d " % im.size) + fp.write("%d %d 0 1 1 \"%s\"\n" % operator) + + # + # image header + fp.write("gsave\n") + fp.write("10 dict begin\n") + fp.write("/buf %d string def\n" % (im.size[0] * operator[1])) + fp.write("%d %d scale\n" % im.size) + fp.write("%d %d 8\n" % im.size) # <= bits + fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1])) + fp.write("{ currentfile buf readhexstring pop } bind\n") + fp.write(operator[2] + "\n") + if hasattr(fp, "flush"): + fp.flush() + + ImageFile._save(im, base_fp, [("eps", (0, 0)+im.size, 0, None)]) + + fp.write("\n%%%%EndBinary\n") + fp.write("grestore end\n") + if hasattr(fp, "flush"): + fp.flush() + +# +# -------------------------------------------------------------------- + + +Image.register_open(EpsImageFile.format, EpsImageFile, _accept) + +Image.register_save(EpsImageFile.format, _save) + +Image.register_extensions(EpsImageFile.format, [".ps", ".eps"]) + +Image.register_mime(EpsImageFile.format, "application/postscript") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/EpsImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/EpsImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cd055061dfd5a34be479106bb566e7cf931a3a5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/EpsImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ExifTags.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ExifTags.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ad26bcc1dbefcf382d8358b1f50f08f3145598 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ExifTags.py @@ -0,0 +1,315 @@ +# +# The Python Imaging Library. +# $Id$ +# +# EXIF tags +# +# Copyright (c) 2003 by Secret Labs AB +# +# See the README file for information on usage and redistribution. +# + +## +# This module provides constants and clear-text names for various +# well-known EXIF tags. +## + +## +# Maps EXIF tags to tag names. + +TAGS = { + + # possibly incomplete + 0x000b: "ProcessingSoftware", + 0x00fe: "NewSubfileType", + 0x00ff: "SubfileType", + 0x0100: "ImageWidth", + 0x0101: "ImageLength", + 0x0102: "BitsPerSample", + 0x0103: "Compression", + 0x0106: "PhotometricInterpretation", + 0x0107: "Thresholding", + 0x0108: "CellWidth", + 0x0109: "CellLength", + 0x010a: "FillOrder", + 0x010d: "DocumentName", + 0x010e: "ImageDescription", + 0x010f: "Make", + 0x0110: "Model", + 0x0111: "StripOffsets", + 0x0112: "Orientation", + 0x0115: "SamplesPerPixel", + 0x0116: "RowsPerStrip", + 0x0117: "StripByteCounts", + 0x0118: "MinSampleValue", + 0x0119: "MaxSampleValue", + 0x011a: "XResolution", + 0x011b: "YResolution", + 0x011c: "PlanarConfiguration", + 0x011d: "PageName", + 0x0120: "FreeOffsets", + 0x0121: "FreeByteCounts", + 0x0122: "GrayResponseUnit", + 0x0123: "GrayResponseCurve", + 0x0124: "T4Options", + 0x0125: "T6Options", + 0x0128: "ResolutionUnit", + 0x0129: "PageNumber", + 0x012d: "TransferFunction", + 0x0131: "Software", + 0x0132: "DateTime", + 0x013b: "Artist", + 0x013c: "HostComputer", + 0x013d: "Predictor", + 0x013e: "WhitePoint", + 0x013f: "PrimaryChromaticities", + 0x0140: "ColorMap", + 0x0141: "HalftoneHints", + 0x0142: "TileWidth", + 0x0143: "TileLength", + 0x0144: "TileOffsets", + 0x0145: "TileByteCounts", + 0x014a: "SubIFDs", + 0x014c: "InkSet", + 0x014d: "InkNames", + 0x014e: "NumberOfInks", + 0x0150: "DotRange", + 0x0151: "TargetPrinter", + 0x0152: "ExtraSamples", + 0x0153: "SampleFormat", + 0x0154: "SMinSampleValue", + 0x0155: "SMaxSampleValue", + 0x0156: "TransferRange", + 0x0157: "ClipPath", + 0x0158: "XClipPathUnits", + 0x0159: "YClipPathUnits", + 0x015a: "Indexed", + 0x015b: "JPEGTables", + 0x015f: "OPIProxy", + 0x0200: "JPEGProc", + 0x0201: "JpegIFOffset", + 0x0202: "JpegIFByteCount", + 0x0203: "JpegRestartInterval", + 0x0205: "JpegLosslessPredictors", + 0x0206: "JpegPointTransforms", + 0x0207: "JpegQTables", + 0x0208: "JpegDCTables", + 0x0209: "JpegACTables", + 0x0211: "YCbCrCoefficients", + 0x0212: "YCbCrSubSampling", + 0x0213: "YCbCrPositioning", + 0x0214: "ReferenceBlackWhite", + 0x02bc: "XMLPacket", + 0x1000: "RelatedImageFileFormat", + 0x1001: "RelatedImageWidth", + 0x1002: "RelatedImageLength", + 0x4746: "Rating", + 0x4749: "RatingPercent", + 0x800d: "ImageID", + 0x828d: "CFARepeatPatternDim", + 0x828e: "CFAPattern", + 0x828f: "BatteryLevel", + 0x8298: "Copyright", + 0x829a: "ExposureTime", + 0x829d: "FNumber", + 0x83bb: "IPTCNAA", + 0x8649: "ImageResources", + 0x8769: "ExifOffset", + 0x8773: "InterColorProfile", + 0x8822: "ExposureProgram", + 0x8824: "SpectralSensitivity", + 0x8825: "GPSInfo", + 0x8827: "ISOSpeedRatings", + 0x8828: "OECF", + 0x8829: "Interlace", + 0x882a: "TimeZoneOffset", + 0x882b: "SelfTimerMode", + 0x9000: "ExifVersion", + 0x9003: "DateTimeOriginal", + 0x9004: "DateTimeDigitized", + 0x9101: "ComponentsConfiguration", + 0x9102: "CompressedBitsPerPixel", + 0x9201: "ShutterSpeedValue", + 0x9202: "ApertureValue", + 0x9203: "BrightnessValue", + 0x9204: "ExposureBiasValue", + 0x9205: "MaxApertureValue", + 0x9206: "SubjectDistance", + 0x9207: "MeteringMode", + 0x9208: "LightSource", + 0x9209: "Flash", + 0x920a: "FocalLength", + 0x920b: "FlashEnergy", + 0x920c: "SpatialFrequencyResponse", + 0x920d: "Noise", + 0x9211: "ImageNumber", + 0x9212: "SecurityClassification", + 0x9213: "ImageHistory", + 0x9214: "SubjectLocation", + 0x9215: "ExposureIndex", + 0x9216: "TIFF/EPStandardID", + 0x927c: "MakerNote", + 0x9286: "UserComment", + 0x9290: "SubsecTime", + 0x9291: "SubsecTimeOriginal", + 0x9292: "SubsecTimeDigitized", + 0x9c9b: "XPTitle", + 0x9c9c: "XPComment", + 0x9c9d: "XPAuthor", + 0x9c9e: "XPKeywords", + 0x9c9f: "XPSubject", + 0xa000: "FlashPixVersion", + 0xa001: "ColorSpace", + 0xa002: "ExifImageWidth", + 0xa003: "ExifImageHeight", + 0xa004: "RelatedSoundFile", + 0xa005: "ExifInteroperabilityOffset", + 0xa20b: "FlashEnergy", + 0xa20c: "SpatialFrequencyResponse", + 0xa20e: "FocalPlaneXResolution", + 0xa20f: "FocalPlaneYResolution", + 0xa210: "FocalPlaneResolutionUnit", + 0xa214: "SubjectLocation", + 0xa215: "ExposureIndex", + 0xa217: "SensingMethod", + 0xa300: "FileSource", + 0xa301: "SceneType", + 0xa302: "CFAPattern", + 0xa401: "CustomRendered", + 0xa402: "ExposureMode", + 0xa403: "WhiteBalance", + 0xa404: "DigitalZoomRatio", + 0xa405: "FocalLengthIn35mmFilm", + 0xa406: "SceneCaptureType", + 0xa407: "GainControl", + 0xa408: "Contrast", + 0xa409: "Saturation", + 0xa40a: "Sharpness", + 0xa40b: "DeviceSettingDescription", + 0xa40c: "SubjectDistanceRange", + 0xa420: "ImageUniqueID", + 0xa430: "CameraOwnerName", + 0xa431: "BodySerialNumber", + 0xa432: "LensSpecification", + 0xa433: "LensMake", + 0xa434: "LensModel", + 0xa435: "LensSerialNumber", + 0xa500: "Gamma", + 0xc4a5: "PrintImageMatching", + 0xc612: "DNGVersion", + 0xc613: "DNGBackwardVersion", + 0xc614: "UniqueCameraModel", + 0xc615: "LocalizedCameraModel", + 0xc616: "CFAPlaneColor", + 0xc617: "CFALayout", + 0xc618: "LinearizationTable", + 0xc619: "BlackLevelRepeatDim", + 0xc61a: "BlackLevel", + 0xc61b: "BlackLevelDeltaH", + 0xc61c: "BlackLevelDeltaV", + 0xc61d: "WhiteLevel", + 0xc61e: "DefaultScale", + 0xc61f: "DefaultCropOrigin", + 0xc620: "DefaultCropSize", + 0xc621: "ColorMatrix1", + 0xc622: "ColorMatrix2", + 0xc623: "CameraCalibration1", + 0xc624: "CameraCalibration2", + 0xc625: "ReductionMatrix1", + 0xc626: "ReductionMatrix2", + 0xc627: "AnalogBalance", + 0xc628: "AsShotNeutral", + 0xc629: "AsShotWhiteXY", + 0xc62a: "BaselineExposure", + 0xc62b: "BaselineNoise", + 0xc62c: "BaselineSharpness", + 0xc62d: "BayerGreenSplit", + 0xc62e: "LinearResponseLimit", + 0xc62f: "CameraSerialNumber", + 0xc630: "LensInfo", + 0xc631: "ChromaBlurRadius", + 0xc632: "AntiAliasStrength", + 0xc633: "ShadowScale", + 0xc634: "DNGPrivateData", + 0xc635: "MakerNoteSafety", + 0xc65a: "CalibrationIlluminant1", + 0xc65b: "CalibrationIlluminant2", + 0xc65c: "BestQualityScale", + 0xc65d: "RawDataUniqueID", + 0xc68b: "OriginalRawFileName", + 0xc68c: "OriginalRawFileData", + 0xc68d: "ActiveArea", + 0xc68e: "MaskedAreas", + 0xc68f: "AsShotICCProfile", + 0xc690: "AsShotPreProfileMatrix", + 0xc691: "CurrentICCProfile", + 0xc692: "CurrentPreProfileMatrix", + 0xc6bf: "ColorimetricReference", + 0xc6f3: "CameraCalibrationSignature", + 0xc6f4: "ProfileCalibrationSignature", + 0xc6f6: "AsShotProfileName", + 0xc6f7: "NoiseReductionApplied", + 0xc6f8: "ProfileName", + 0xc6f9: "ProfileHueSatMapDims", + 0xc6fa: "ProfileHueSatMapData1", + 0xc6fb: "ProfileHueSatMapData2", + 0xc6fc: "ProfileToneCurve", + 0xc6fd: "ProfileEmbedPolicy", + 0xc6fe: "ProfileCopyright", + 0xc714: "ForwardMatrix1", + 0xc715: "ForwardMatrix2", + 0xc716: "PreviewApplicationName", + 0xc717: "PreviewApplicationVersion", + 0xc718: "PreviewSettingsName", + 0xc719: "PreviewSettingsDigest", + 0xc71a: "PreviewColorSpace", + 0xc71b: "PreviewDateTime", + 0xc71c: "RawImageDigest", + 0xc71d: "OriginalRawFileDigest", + 0xc71e: "SubTileBlockSize", + 0xc71f: "RowInterleaveFactor", + 0xc725: "ProfileLookTableDims", + 0xc726: "ProfileLookTableData", + 0xc740: "OpcodeList1", + 0xc741: "OpcodeList2", + 0xc74e: "OpcodeList3", + 0xc761: "NoiseProfile" +} + +## +# Maps EXIF GPS tags to tag names. + +GPSTAGS = { + 0: "GPSVersionID", + 1: "GPSLatitudeRef", + 2: "GPSLatitude", + 3: "GPSLongitudeRef", + 4: "GPSLongitude", + 5: "GPSAltitudeRef", + 6: "GPSAltitude", + 7: "GPSTimeStamp", + 8: "GPSSatellites", + 9: "GPSStatus", + 10: "GPSMeasureMode", + 11: "GPSDOP", + 12: "GPSSpeedRef", + 13: "GPSSpeed", + 14: "GPSTrackRef", + 15: "GPSTrack", + 16: "GPSImgDirectionRef", + 17: "GPSImgDirection", + 18: "GPSMapDatum", + 19: "GPSDestLatitudeRef", + 20: "GPSDestLatitude", + 21: "GPSDestLongitudeRef", + 22: "GPSDestLongitude", + 23: "GPSDestBearingRef", + 24: "GPSDestBearing", + 25: "GPSDestDistanceRef", + 26: "GPSDestDistance", + 27: "GPSProcessingMethod", + 28: "GPSAreaInformation", + 29: "GPSDateStamp", + 30: "GPSDifferential", + 31: "GPSHPositioningError", +} diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ExifTags.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ExifTags.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a3812d859b80fe0a7feb82da11e7160ec322baa Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ExifTags.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FitsStubImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FitsStubImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..be926cadb84ca3eeb33254f718e1be043a61c529 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FitsStubImagePlugin.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library +# $Id$ +# +# FITS stub adapter +# +# Copyright (c) 1998-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific FITS image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix): + return prefix[:6] == b"SIMPLE" + + +class FITSStubImageFile(ImageFile.StubImageFile): + + format = "FITS" + format_description = "FITS" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(6)): + raise SyntaxError("Not a FITS file") + + # FIXME: add more sanity checks here; mandatory header items + # include SIMPLE, BITPIX, NAXIS, etc. + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self.size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise IOError("FITS save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(FITSStubImageFile.format, FITSStubImageFile, _accept) +Image.register_save(FITSStubImageFile.format, _save) + +Image.register_extensions(FITSStubImageFile.format, [".fit", ".fits"]) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FitsStubImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FitsStubImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eba21b6246394deba661e7d08354c23683236273 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FitsStubImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FliImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FliImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..2c190b6354fa7e312901fef121b01d6844ab8dd8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FliImagePlugin.py @@ -0,0 +1,163 @@ +# +# The Python Imaging Library. +# $Id$ +# +# FLI/FLC file handling. +# +# History: +# 95-09-01 fl Created +# 97-01-03 fl Fixed parser, setup decoder tile +# 98-07-15 fl Renamed offset attribute to avoid name clash +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1995-97. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, i16le as i16, i32le as i32, o8 + +__version__ = "0.2" + + +# +# decoder + +def _accept(prefix): + return len(prefix) >= 6 and i16(prefix[4:6]) in [0xAF11, 0xAF12] + + +## +# Image plugin for the FLI/FLC animation format. Use the seek +# method to load individual frames. + +class FliImageFile(ImageFile.ImageFile): + + format = "FLI" + format_description = "Autodesk FLI/FLC Animation" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # HEAD + s = self.fp.read(128) + magic = i16(s[4:6]) + if not (magic in [0xAF11, 0xAF12] and + i16(s[14:16]) in [0, 3] and # flags + s[20:22] == b"\x00\x00"): # reserved + raise SyntaxError("not an FLI/FLC file") + + # frames + self.__framecount = i16(s[6:8]) + + # image characteristics + self.mode = "P" + self.size = i16(s[8:10]), i16(s[10:12]) + + # animation speed + duration = i32(s[16:20]) + if magic == 0xAF11: + duration = (duration * 1000) // 70 + self.info["duration"] = duration + + # look for palette + palette = [(a, a, a) for a in range(256)] + + s = self.fp.read(16) + + self.__offset = 128 + + if i16(s[4:6]) == 0xF100: + # prefix chunk; ignore it + self.__offset = self.__offset + i32(s) + s = self.fp.read(16) + + if i16(s[4:6]) == 0xF1FA: + # look for palette chunk + s = self.fp.read(6) + if i16(s[4:6]) == 11: + self._palette(palette, 2) + elif i16(s[4:6]) == 4: + self._palette(palette, 0) + + palette = [o8(r)+o8(g)+o8(b) for (r, g, b) in palette] + self.palette = ImagePalette.raw("RGB", b"".join(palette)) + + # set things up to decode first frame + self.__frame = -1 + self.__fp = self.fp + self.__rewind = self.fp.tell() + self.seek(0) + + def _palette(self, palette, shift): + # load palette + + i = 0 + for e in range(i16(self.fp.read(2))): + s = self.fp.read(2) + i = i + i8(s[0]) + n = i8(s[1]) + if n == 0: + n = 256 + s = self.fp.read(n * 3) + for n in range(0, len(s), 3): + r = i8(s[n]) << shift + g = i8(s[n+1]) << shift + b = i8(s[n+2]) << shift + palette[i] = (r, g, b) + i += 1 + + @property + def n_frames(self): + return self.__framecount + + @property + def is_animated(self): + return self.__framecount > 1 + + def seek(self, frame): + if not self._seek_check(frame): + return + if frame < self.__frame: + self._seek(0) + + for f in range(self.__frame + 1, frame + 1): + self._seek(f) + + def _seek(self, frame): + if frame == 0: + self.__frame = -1 + self.__fp.seek(self.__rewind) + self.__offset = 128 + + if frame != self.__frame + 1: + raise ValueError("cannot seek to frame %d" % frame) + self.__frame = frame + + # move to next frame + self.fp = self.__fp + self.fp.seek(self.__offset) + + s = self.fp.read(4) + if not s: + raise EOFError + + framesize = i32(s) + + self.decodermaxblock = framesize + self.tile = [("fli", (0, 0)+self.size, self.__offset, None)] + + self.__offset += framesize + + def tell(self): + return self.__frame + + +# +# registry + +Image.register_open(FliImageFile.format, FliImageFile, _accept) + +Image.register_extensions(FliImageFile.format, [".fli", ".flc"]) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FliImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FliImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1917ec12c46965c7b88191d2ce83b272263e722 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FliImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FontFile.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FontFile.py new file mode 100644 index 0000000000000000000000000000000000000000..46e49bc4ee106c715abff8cadf8ec10a561c3540 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FontFile.py @@ -0,0 +1,114 @@ +# +# The Python Imaging Library +# $Id$ +# +# base class for raster font file parsers +# +# history: +# 1997-06-05 fl created +# 1997-08-19 fl restrict image width +# +# Copyright (c) 1997-1998 by Secret Labs AB +# Copyright (c) 1997-1998 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from __future__ import print_function + +import os +from . import Image, _binary + +WIDTH = 800 + + +def puti16(fp, values): + # write network order (big-endian) 16-bit sequence + for v in values: + if v < 0: + v += 65536 + fp.write(_binary.o16be(v)) + + +## +# Base class for raster font file handlers. + +class FontFile(object): + + bitmap = None + + def __init__(self): + + self.info = {} + self.glyph = [None] * 256 + + def __getitem__(self, ix): + return self.glyph[ix] + + def compile(self): + "Create metrics and bitmap" + + if self.bitmap: + return + + # create bitmap large enough to hold all data + h = w = maxwidth = 0 + lines = 1 + for glyph in self: + if glyph: + d, dst, src, im = glyph + h = max(h, src[3] - src[1]) + w = w + (src[2] - src[0]) + if w > WIDTH: + lines += 1 + w = (src[2] - src[0]) + maxwidth = max(maxwidth, w) + + xsize = maxwidth + ysize = lines * h + + if xsize == 0 and ysize == 0: + return "" + + self.ysize = h + + # paste glyphs into bitmap + self.bitmap = Image.new("1", (xsize, ysize)) + self.metrics = [None] * 256 + x = y = 0 + for i in range(256): + glyph = self[i] + if glyph: + d, dst, src, im = glyph + xx = src[2] - src[0] + # yy = src[3] - src[1] + x0, y0 = x, y + x = x + xx + if x > WIDTH: + x, y = 0, y + h + x0, y0 = x, y + x = xx + s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0 + self.bitmap.paste(im.crop(src), s) + # print(chr(i), dst, s) + self.metrics[i] = d, dst, s + + def save(self, filename): + "Save font" + + self.compile() + + # font data + self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG") + + # font metrics + with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp: + fp.write(b"PILfont\n") + fp.write((";;;;;;%d;\n" % self.ysize).encode('ascii')) # HACK!!! + fp.write(b"DATA\n") + for id in range(256): + m = self.metrics[id] + if not m: + puti16(fp, [0] * 10) + else: + puti16(fp, m[0] + m[1] + m[2]) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FontFile.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FontFile.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5e816410f8fed69812d139a6d2d2a5c5eb3f8c5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FontFile.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FpxImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FpxImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..d7bba42eb52e81a96465b3dc60690c69d8cebdf6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FpxImagePlugin.py @@ -0,0 +1,229 @@ +# +# THIS IS WORK IN PROGRESS +# +# The Python Imaging Library. +# $Id$ +# +# FlashPix support for PIL +# +# History: +# 97-01-25 fl Created (reads uncompressed RGB images only) +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +from __future__ import print_function + +from . import Image, ImageFile +from ._binary import i32le as i32, i8 + +import olefile + +__version__ = "0.1" + +# we map from colour field tuples to (mode, rawmode) descriptors +MODES = { + # opacity + (0x00007ffe): ("A", "L"), + # monochrome + (0x00010000,): ("L", "L"), + (0x00018000, 0x00017ffe): ("RGBA", "LA"), + # photo YCC + (0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"), + (0x00028000, 0x00028001, 0x00028002, 0x00027ffe): ("RGBA", "YCCA;P"), + # standard RGB (NIFRGB) + (0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"), + (0x00038000, 0x00038001, 0x00038002, 0x00037ffe): ("RGBA", "RGBA"), +} + + +# +# -------------------------------------------------------------------- + +def _accept(prefix): + return prefix[:8] == olefile.MAGIC + + +## +# Image plugin for the FlashPix images. + +class FpxImageFile(ImageFile.ImageFile): + + format = "FPX" + format_description = "FlashPix" + + def _open(self): + # + # read the OLE directory and see if this is a likely + # to be a FlashPix file + + try: + self.ole = olefile.OleFileIO(self.fp) + except IOError: + raise SyntaxError("not an FPX file; invalid OLE file") + + if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B": + raise SyntaxError("not an FPX file; bad root CLSID") + + self._open_index(1) + + def _open_index(self, index=1): + # + # get the Image Contents Property Set + + prop = self.ole.getproperties([ + "Data Object Store %06d" % index, + "\005Image Contents" + ]) + + # size (highest resolution) + + self.size = prop[0x1000002], prop[0x1000003] + + size = max(self.size) + i = 1 + while size > 64: + size = size / 2 + i += 1 + self.maxid = i - 1 + + # mode. instead of using a single field for this, flashpix + # requires you to specify the mode for each channel in each + # resolution subimage, and leaves it to the decoder to make + # sure that they all match. for now, we'll cheat and assume + # that this is always the case. + + id = self.maxid << 16 + + s = prop[0x2000002 | id] + + colors = [] + for i in range(i32(s, 4)): + # note: for now, we ignore the "uncalibrated" flag + colors.append(i32(s, 8+i*4) & 0x7fffffff) + + self.mode, self.rawmode = MODES[tuple(colors)] + + # load JPEG tables, if any + self.jpeg = {} + for i in range(256): + id = 0x3000001 | (i << 16) + if id in prop: + self.jpeg[i] = prop[id] + + # print(len(self.jpeg), "tables loaded") + + self._open_subimage(1, self.maxid) + + def _open_subimage(self, index=1, subimage=0): + # + # setup tile descriptors for a given subimage + + stream = [ + "Data Object Store %06d" % index, + "Resolution %04d" % subimage, + "Subimage 0000 Header" + ] + + fp = self.ole.openstream(stream) + + # skip prefix + fp.read(28) + + # header stream + s = fp.read(36) + + size = i32(s, 4), i32(s, 8) + # tilecount = i32(s, 12) + tilesize = i32(s, 16), i32(s, 20) + # channels = i32(s, 24) + offset = i32(s, 28) + length = i32(s, 32) + + # print(size, self.mode, self.rawmode) + + if size != self.size: + raise IOError("subimage mismatch") + + # get tile descriptors + fp.seek(28 + offset) + s = fp.read(i32(s, 12) * length) + + x = y = 0 + xsize, ysize = size + xtile, ytile = tilesize + self.tile = [] + + for i in range(0, len(s), length): + + compression = i32(s, i+8) + + if compression == 0: + self.tile.append(("raw", (x, y, x+xtile, y+ytile), + i32(s, i) + 28, (self.rawmode))) + + elif compression == 1: + + # FIXME: the fill decoder is not implemented + self.tile.append(("fill", (x, y, x+xtile, y+ytile), + i32(s, i) + 28, (self.rawmode, s[12:16]))) + + elif compression == 2: + + internal_color_conversion = i8(s[14]) + jpeg_tables = i8(s[15]) + rawmode = self.rawmode + + if internal_color_conversion: + # The image is stored as usual (usually YCbCr). + if rawmode == "RGBA": + # For "RGBA", data is stored as YCbCrA based on + # negative RGB. The following trick works around + # this problem : + jpegmode, rawmode = "YCbCrK", "CMYK" + else: + jpegmode = None # let the decoder decide + + else: + # The image is stored as defined by rawmode + jpegmode = rawmode + + self.tile.append(("jpeg", (x, y, x+xtile, y+ytile), + i32(s, i) + 28, (rawmode, jpegmode))) + + # FIXME: jpeg tables are tile dependent; the prefix + # data must be placed in the tile descriptor itself! + + if jpeg_tables: + self.tile_prefix = self.jpeg[jpeg_tables] + + else: + raise IOError("unknown/invalid compression") + + x = x + xtile + if x >= xsize: + x, y = 0, y + ytile + if y >= ysize: + break # isn't really required + + self.stream = stream + self.fp = None + + def load(self): + + if not self.fp: + self.fp = self.ole.openstream(self.stream[:2] + + ["Subimage 0000 Data"]) + + return ImageFile.ImageFile.load(self) + +# +# -------------------------------------------------------------------- + + +Image.register_open(FpxImageFile.format, FpxImageFile, _accept) + +Image.register_extension(FpxImageFile.format, ".fpx") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FpxImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FpxImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a13e675c8ade6d28ffa5d8365e0caee0493cd397 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FpxImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FtexImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FtexImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..9b9809062d2bca6b1cd06086da9b66dad7850cae --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/FtexImagePlugin.py @@ -0,0 +1,94 @@ +""" +A Pillow loader for .ftc and .ftu files (FTEX) +Jerome Leclanche + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ + +Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001 + +The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a +packed custom format called FTEX. This file format uses file extensions FTC and FTU. +* FTC files are compressed textures (using standard texture compression). +* FTU files are not compressed. +Texture File Format +The FTC and FTU texture files both use the same format. This +has the following structure: +{header} +{format_directory} +{data} +Where: +{header} = { u32:magic, u32:version, u32:width, u32:height, u32:mipmap_count, u32:format_count } + +* The "magic" number is "FTEX". +* "width" and "height" are the dimensions of the texture. +* "mipmap_count" is the number of mipmaps in the texture. +* "format_count" is the number of texture formats (different versions of the same texture) in this file. + +{format_directory} = format_count * { u32:format, u32:where } + +The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB uncompressed textures. +The texture data for a format starts at the position "where" in the file. + +Each set of texture data in the file has the following structure: +{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } } +* "mipmap_size" is the number of bytes in that mip level. For compressed textures this is the +size of the texture data compressed with DXT1. For 24 bit uncompressed textures, this is 3 * width * height. +Following this are the image bytes for that mipmap level. + +Note: All data is stored in little-Endian (Intel) byte order. +""" + +import struct +from io import BytesIO +from . import Image, ImageFile + + +MAGIC = b"FTEX" +FORMAT_DXT1 = 0 +FORMAT_UNCOMPRESSED = 1 + + +class FtexImageFile(ImageFile.ImageFile): + format = "FTEX" + format_description = "Texture File Format (IW2:EOC)" + + def _open(self): + magic = struct.unpack("= 8 and i32(prefix[:4]) >= 20 and i32(prefix[4:8]) in (1, 2) + + +## +# Image plugin for the GIMP brush format. + +class GbrImageFile(ImageFile.ImageFile): + + format = "GBR" + format_description = "GIMP brush file" + + def _open(self): + header_size = i32(self.fp.read(4)) + version = i32(self.fp.read(4)) + if header_size < 20: + raise SyntaxError("not a GIMP brush") + if version not in (1, 2): + raise SyntaxError("Unsupported GIMP brush version: %s" % version) + + width = i32(self.fp.read(4)) + height = i32(self.fp.read(4)) + color_depth = i32(self.fp.read(4)) + if width <= 0 or height <= 0: + raise SyntaxError("not a GIMP brush") + if color_depth not in (1, 4): + raise SyntaxError("Unsupported GIMP brush color depth: %s" % color_depth) + + if version == 1: + comment_length = header_size-20 + else: + comment_length = header_size-28 + magic_number = self.fp.read(4) + if magic_number != b'GIMP': + raise SyntaxError("not a GIMP brush, bad magic number") + self.info['spacing'] = i32(self.fp.read(4)) + + comment = self.fp.read(comment_length)[:-1] + + if color_depth == 1: + self.mode = "L" + else: + self.mode = 'RGBA' + + self.size = width, height + + self.info["comment"] = comment + + # Image might not be small + Image._decompression_bomb_check(self.size) + + # Data is an uncompressed block of w * h * bytes/pixel + self._data_size = width * height * color_depth + + def load(self): + self.im = Image.core.new(self.mode, self.size) + self.frombytes(self.fp.read(self._data_size)) + +# +# registry + + +Image.register_open(GbrImageFile.format, GbrImageFile, _accept) +Image.register_extension(GbrImageFile.format, ".gbr") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GbrImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GbrImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcae8f3e2ab83a192a97ee2800267fa18789de2d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GbrImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GdImageFile.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GdImageFile.py new file mode 100644 index 0000000000000000000000000000000000000000..2ca1e82185468870e2b8544bc90cf843b5c3aee4 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GdImageFile.py @@ -0,0 +1,83 @@ +# +# The Python Imaging Library. +# $Id$ +# +# GD file handling +# +# History: +# 1996-04-12 fl Created +# +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +# NOTE: This format cannot be automatically recognized, so the +# class is not registered for use with Image.open(). To open a +# gd file, use the GdImageFile.open() function instead. + +# THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This +# implementation is provided for convenience and demonstrational +# purposes only. + + +from . import ImageFile, ImagePalette +from ._binary import i8, i16be as i16, i32be as i32 + +__version__ = "0.1" + + +## +# Image plugin for the GD uncompressed format. Note that this format +# is not supported by the standard Image.open function. To use +# this plugin, you have to import the GdImageFile module and +# use the GdImageFile.open function. + +class GdImageFile(ImageFile.ImageFile): + + format = "GD" + format_description = "GD uncompressed images" + + def _open(self): + + # Header + s = self.fp.read(1037) + + if not i16(s[:2]) in [65534, 65535]: + raise SyntaxError("Not a valid GD 2.x .gd file") + + self.mode = "L" # FIXME: "P" + self.size = i16(s[2:4]), i16(s[4:6]) + + trueColor = i8(s[6]) + trueColorOffset = 2 if trueColor else 0 + + # transparency index + tindex = i32(s[7+trueColorOffset:7+trueColorOffset+4]) + if tindex < 256: + self.info["transparency"] = tindex + + self.palette = ImagePalette.raw("XBGR", s[7+trueColorOffset+4:7+trueColorOffset+4+256*4]) + + self.tile = [("raw", (0, 0)+self.size, 7+trueColorOffset+4+256*4, ("L", 0, 1))] + + +def open(fp, mode="r"): + """ + Load texture from a GD image file. + + :param filename: GD file name, or an opened file handle. + :param mode: Optional mode. In this version, if the mode argument + is given, it must be "r". + :returns: An image instance. + :raises IOError: If the image could not be read. + """ + if mode != "r": + raise ValueError("bad mode") + + try: + return GdImageFile(fp) + except SyntaxError: + raise IOError("cannot identify this image file") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GdImageFile.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GdImageFile.pyc new file mode 100644 index 0000000000000000000000000000000000000000..936f842954f7aca1abf789b5698649acabbcf5c0 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GdImageFile.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GifImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GifImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..1bfbb5ffdd5b5e4e98d4c2bd0b3be2871b36173a --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GifImagePlugin.py @@ -0,0 +1,814 @@ +# +# The Python Imaging Library. +# $Id$ +# +# GIF file handling +# +# History: +# 1995-09-01 fl Created +# 1996-12-14 fl Added interlace support +# 1996-12-30 fl Added animation support +# 1997-01-05 fl Added write support, fixed local colour map bug +# 1997-02-23 fl Make sure to load raster data in getdata() +# 1997-07-05 fl Support external decoder (0.4) +# 1998-07-09 fl Handle all modes when saving (0.5) +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 2001-04-16 fl Added rewind support (seek to frame 0) (0.6) +# 2001-04-17 fl Added palette optimization (0.7) +# 2002-06-06 fl Added transparency support for save (0.8) +# 2004-02-24 fl Disable interlacing for small images +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1995-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile, ImagePalette, ImageChops, ImageSequence +from ._binary import i8, i16le as i16, o8, o16le as o16 + +import itertools + +__version__ = "0.9" + + +# -------------------------------------------------------------------- +# Identify/read GIF files + +def _accept(prefix): + return prefix[:6] in [b"GIF87a", b"GIF89a"] + + +## +# Image plugin for GIF images. This plugin supports both GIF87 and +# GIF89 images. + +class GifImageFile(ImageFile.ImageFile): + + format = "GIF" + format_description = "Compuserve GIF" + _close_exclusive_fp_after_loading = False + + global_palette = None + + def data(self): + s = self.fp.read(1) + if s and i8(s): + return self.fp.read(i8(s)) + return None + + def _open(self): + + # Screen + s = self.fp.read(13) + if s[:6] not in [b"GIF87a", b"GIF89a"]: + raise SyntaxError("not a GIF file") + + self.info["version"] = s[:6] + self.size = i16(s[6:]), i16(s[8:]) + self.tile = [] + flags = i8(s[10]) + bits = (flags & 7) + 1 + + if flags & 128: + # get global palette + self.info["background"] = i8(s[11]) + # check if palette contains colour indices + p = self.fp.read(3 << bits) + for i in range(0, len(p), 3): + if not (i//3 == i8(p[i]) == i8(p[i+1]) == i8(p[i+2])): + p = ImagePalette.raw("RGB", p) + self.global_palette = self.palette = p + break + + self.__fp = self.fp # FIXME: hack + self.__rewind = self.fp.tell() + self._n_frames = None + self._is_animated = None + self._seek(0) # get ready to read first frame + + @property + def n_frames(self): + if self._n_frames is None: + current = self.tell() + try: + while True: + self.seek(self.tell() + 1) + except EOFError: + self._n_frames = self.tell() + 1 + self.seek(current) + return self._n_frames + + @property + def is_animated(self): + if self._is_animated is None: + if self._n_frames is not None: + self._is_animated = self._n_frames != 1 + else: + current = self.tell() + + try: + self.seek(1) + self._is_animated = True + except EOFError: + self._is_animated = False + + self.seek(current) + return self._is_animated + + def seek(self, frame): + if not self._seek_check(frame): + return + if frame < self.__frame: + self._seek(0) + + last_frame = self.__frame + for f in range(self.__frame + 1, frame + 1): + try: + self._seek(f) + except EOFError: + self.seek(last_frame) + raise EOFError("no more images in GIF file") + + def _seek(self, frame): + + if frame == 0: + # rewind + self.__offset = 0 + self.dispose = None + self.dispose_extent = [0, 0, 0, 0] # x0, y0, x1, y1 + self.__frame = -1 + self.__fp.seek(self.__rewind) + self._prev_im = None + self.disposal_method = 0 + else: + # ensure that the previous frame was loaded + if not self.im: + self.load() + + if frame != self.__frame + 1: + raise ValueError("cannot seek to frame %d" % frame) + self.__frame = frame + + self.tile = [] + + self.fp = self.__fp + if self.__offset: + # backup to last frame + self.fp.seek(self.__offset) + while self.data(): + pass + self.__offset = 0 + + if self.dispose: + self.im.paste(self.dispose, self.dispose_extent) + + from copy import copy + self.palette = copy(self.global_palette) + + while True: + + s = self.fp.read(1) + if not s or s == b";": + break + + elif s == b"!": + # + # extensions + # + s = self.fp.read(1) + block = self.data() + if i8(s) == 249: + # + # graphic control extension + # + flags = i8(block[0]) + if flags & 1: + self.info["transparency"] = i8(block[3]) + self.info["duration"] = i16(block[1:3]) * 10 + + # disposal method - find the value of bits 4 - 6 + dispose_bits = 0b00011100 & flags + dispose_bits = dispose_bits >> 2 + if dispose_bits: + # only set the dispose if it is not + # unspecified. I'm not sure if this is + # correct, but it seems to prevent the last + # frame from looking odd for some animations + self.disposal_method = dispose_bits + elif i8(s) == 254: + # + # comment extension + # + self.info["comment"] = block + elif i8(s) == 255: + # + # application extension + # + self.info["extension"] = block, self.fp.tell() + if block[:11] == b"NETSCAPE2.0": + block = self.data() + if len(block) >= 3 and i8(block[0]) == 1: + self.info["loop"] = i16(block[1:3]) + while self.data(): + pass + + elif s == b",": + # + # local image + # + s = self.fp.read(9) + + # extent + x0, y0 = i16(s[0:]), i16(s[2:]) + x1, y1 = x0 + i16(s[4:]), y0 + i16(s[6:]) + self.dispose_extent = x0, y0, x1, y1 + flags = i8(s[8]) + + interlace = (flags & 64) != 0 + + if flags & 128: + bits = (flags & 7) + 1 + self.palette =\ + ImagePalette.raw("RGB", self.fp.read(3 << bits)) + + # image data + bits = i8(self.fp.read(1)) + self.__offset = self.fp.tell() + self.tile = [("gif", + (x0, y0, x1, y1), + self.__offset, + (bits, interlace))] + break + + else: + pass + # raise IOError, "illegal GIF tag `%x`" % i8(s) + + try: + if self.disposal_method < 2: + # do not dispose or none specified + self.dispose = None + elif self.disposal_method == 2: + # replace with background colour + self.dispose = Image.core.fill("P", self.size, + self.info["background"]) + else: + # replace with previous contents + if self.im: + self.dispose = self.im.copy() + + # only dispose the extent in this frame + if self.dispose: + self.dispose = self._crop(self.dispose, self.dispose_extent) + except (AttributeError, KeyError): + pass + + if not self.tile: + # self.__fp = None + raise EOFError + + self.mode = "L" + if self.palette: + self.mode = "P" + + def tell(self): + return self.__frame + + def load_end(self): + ImageFile.ImageFile.load_end(self) + + # if the disposal method is 'do not dispose', transparent + # pixels should show the content of the previous frame + if self._prev_im and self.disposal_method == 1: + # we do this by pasting the updated area onto the previous + # frame which we then use as the current image content + updated = self._crop(self.im, self.dispose_extent) + self._prev_im.paste(updated, self.dispose_extent, + updated.convert('RGBA')) + self.im = self._prev_im + self._prev_im = self.im.copy() + +# -------------------------------------------------------------------- +# Write GIF files + + +RAWMODE = { + "1": "L", + "L": "L", + "P": "P" +} + + +def _normalize_mode(im, initial_call=False): + """ + Takes an image (or frame), returns an image in a mode that is appropriate + for saving in a Gif. + + It may return the original image, or it may return an image converted to + palette or 'L' mode. + + UNDONE: What is the point of mucking with the initial call palette, for + an image that shouldn't have a palette, or it would be a mode 'P' and + get returned in the RAWMODE clause. + + :param im: Image object + :param initial_call: Default false, set to true for a single frame. + :returns: Image object + """ + if im.mode in RAWMODE: + im.load() + return im + if Image.getmodebase(im.mode) == "RGB": + if initial_call: + palette_size = 256 + if im.palette: + palette_size = len(im.palette.getdata()[1]) // 3 + return im.convert("P", palette=Image.ADAPTIVE, colors=palette_size) + else: + return im.convert("P") + return im.convert("L") + + +def _normalize_palette(im, palette, info): + """ + Normalizes the palette for image. + - Sets the palette to the incoming palette, if provided. + - Ensures that there's a palette for L mode images + - Optimizes the palette if necessary/desired. + + :param im: Image object + :param palette: bytes object containing the source palette, or .... + :param info: encoderinfo + :returns: Image object + """ + source_palette = None + if palette: + # a bytes palette + if isinstance(palette, (bytes, bytearray, list)): + source_palette = bytearray(palette[:768]) + if isinstance(palette, ImagePalette.ImagePalette): + source_palette = bytearray(itertools.chain.from_iterable( + zip(palette.palette[:256], + palette.palette[256:512], + palette.palette[512:768]))) + + if im.mode == "P": + if not source_palette: + source_palette = im.im.getpalette("RGB")[:768] + else: # L-mode + if not source_palette: + source_palette = bytearray(i//3 for i in range(768)) + im.palette = ImagePalette.ImagePalette("RGB", + palette=source_palette) + + used_palette_colors = _get_optimize(im, info) + if used_palette_colors is not None: + return im.remap_palette(used_palette_colors, source_palette) + + im.palette.palette = source_palette + return im + + +def _write_single_frame(im, fp, palette): + im_out = _normalize_mode(im, True) + im_out = _normalize_palette(im_out, palette, im.encoderinfo) + + for s in _get_global_header(im_out, im.encoderinfo): + fp.write(s) + + # local image header + flags = 0 + if get_interlace(im): + flags = flags | 64 + _write_local_header(fp, im, (0, 0), flags) + + im_out.encoderconfig = (8, get_interlace(im)) + ImageFile._save(im_out, fp, [("gif", (0, 0)+im.size, 0, + RAWMODE[im_out.mode])]) + + fp.write(b"\0") # end of image data + + +def _write_multiple_frames(im, fp, palette): + + duration = im.encoderinfo.get("duration", None) + disposal = im.encoderinfo.get('disposal', None) + + im_frames = [] + frame_count = 0 + for imSequence in itertools.chain([im], im.encoderinfo.get("append_images", [])): + for im_frame in ImageSequence.Iterator(imSequence): + # a copy is required here since seek can still mutate the image + im_frame = _normalize_mode(im_frame.copy()) + im_frame = _normalize_palette(im_frame, palette, im.encoderinfo) + + encoderinfo = im.encoderinfo.copy() + if isinstance(duration, (list, tuple)): + encoderinfo['duration'] = duration[frame_count] + if isinstance(disposal, (list, tuple)): + encoderinfo["disposal"] = disposal[frame_count] + frame_count += 1 + + if im_frames: + # delta frame + previous = im_frames[-1] + if _get_palette_bytes(im_frame) == _get_palette_bytes(previous['im']): + delta = ImageChops.subtract_modulo(im_frame, + previous['im']) + else: + delta = ImageChops.subtract_modulo(im_frame.convert('RGB'), + previous['im'].convert('RGB')) + bbox = delta.getbbox() + if not bbox: + # This frame is identical to the previous frame + if duration: + previous['encoderinfo']['duration'] += encoderinfo['duration'] + continue + else: + bbox = None + im_frames.append({ + 'im': im_frame, + 'bbox': bbox, + 'encoderinfo': encoderinfo + }) + + if len(im_frames) > 1: + for frame_data in im_frames: + im_frame = frame_data['im'] + if not frame_data['bbox']: + # global header + for s in _get_global_header(im_frame, + frame_data['encoderinfo']): + fp.write(s) + offset = (0, 0) + else: + # compress difference + frame_data['encoderinfo']['include_color_table'] = True + + im_frame = im_frame.crop(frame_data['bbox']) + offset = frame_data['bbox'][:2] + _write_frame_data(fp, im_frame, offset, frame_data['encoderinfo']) + return True + + +def _save_all(im, fp, filename): + _save(im, fp, filename, save_all=True) + + +def _save(im, fp, filename, save_all=False): + for k, v in im.info.items(): + im.encoderinfo.setdefault(k, v) + # header + try: + palette = im.encoderinfo["palette"] + except KeyError: + palette = None + im.encoderinfo["optimize"] = im.encoderinfo.get("optimize", True) + + if not save_all or not _write_multiple_frames(im, fp, palette): + _write_single_frame(im, fp, palette) + + fp.write(b";") # end of file + + if hasattr(fp, "flush"): + fp.flush() + + +def get_interlace(im): + interlace = im.encoderinfo.get("interlace", 1) + + # workaround for @PIL153 + if min(im.size) < 16: + interlace = 0 + + return interlace + + +def _write_local_header(fp, im, offset, flags): + transparent_color_exists = False + try: + transparency = im.encoderinfo["transparency"] + except KeyError: + pass + else: + transparency = int(transparency) + # optimize the block away if transparent color is not used + transparent_color_exists = True + + used_palette_colors = _get_optimize(im, im.encoderinfo) + if used_palette_colors is not None: + # adjust the transparency index after optimize + try: + transparency = used_palette_colors.index(transparency) + except ValueError: + transparent_color_exists = False + + if "duration" in im.encoderinfo: + duration = int(im.encoderinfo["duration"] / 10) + else: + duration = 0 + + disposal = int(im.encoderinfo.get('disposal', 0)) + + if transparent_color_exists or duration != 0 or disposal: + packed_flag = 1 if transparent_color_exists else 0 + packed_flag |= disposal << 2 + if not transparent_color_exists: + transparency = 0 + + fp.write(b"!" + + o8(249) + # extension intro + o8(4) + # length + o8(packed_flag) + # packed fields + o16(duration) + # duration + o8(transparency) + # transparency index + o8(0)) + + if "comment" in im.encoderinfo and 1 <= len(im.encoderinfo["comment"]) <= 255: + fp.write(b"!" + + o8(254) + # extension intro + o8(len(im.encoderinfo["comment"])) + + im.encoderinfo["comment"] + + o8(0)) + if "loop" in im.encoderinfo: + number_of_loops = im.encoderinfo["loop"] + fp.write(b"!" + + o8(255) + # extension intro + o8(11) + + b"NETSCAPE2.0" + + o8(3) + + o8(1) + + o16(number_of_loops) + # number of loops + o8(0)) + include_color_table = im.encoderinfo.get('include_color_table') + if include_color_table: + palette = im.encoderinfo.get("palette", None) + palette_bytes = _get_palette_bytes(im) + color_table_size = _get_color_table_size(palette_bytes) + if color_table_size: + flags = flags | 128 # local color table flag + flags = flags | color_table_size + + fp.write(b"," + + o16(offset[0]) + # offset + o16(offset[1]) + + o16(im.size[0]) + # size + o16(im.size[1]) + + o8(flags)) # flags + if include_color_table and color_table_size: + fp.write(_get_header_palette(palette_bytes)) + fp.write(o8(8)) # bits + + +def _save_netpbm(im, fp, filename): + + # Unused by default. + # To use, uncomment the register_save call at the end of the file. + # + # If you need real GIF compression and/or RGB quantization, you + # can use the external NETPBM/PBMPLUS utilities. See comments + # below for information on how to enable this. + + import os + from subprocess import Popen, check_call, PIPE, CalledProcessError + file = im._dump() + + with open(filename, 'wb') as f: + if im.mode != "RGB": + with open(os.devnull, 'wb') as devnull: + check_call(["ppmtogif", file], stdout=f, stderr=devnull) + else: + # Pipe ppmquant output into ppmtogif + # "ppmquant 256 %s | ppmtogif > %s" % (file, filename) + quant_cmd = ["ppmquant", "256", file] + togif_cmd = ["ppmtogif"] + with open(os.devnull, 'wb') as devnull: + quant_proc = Popen(quant_cmd, stdout=PIPE, stderr=devnull) + togif_proc = Popen(togif_cmd, stdin=quant_proc.stdout, + stdout=f, stderr=devnull) + + # Allow ppmquant to receive SIGPIPE if ppmtogif exits + quant_proc.stdout.close() + + retcode = quant_proc.wait() + if retcode: + raise CalledProcessError(retcode, quant_cmd) + + retcode = togif_proc.wait() + if retcode: + raise CalledProcessError(retcode, togif_cmd) + + try: + os.unlink(file) + except OSError: + pass + + +# Force optimization so that we can test performance against +# cases where it took lots of memory and time previously. +_FORCE_OPTIMIZE = False + + +def _get_optimize(im, info): + """ + Palette optimization is a potentially expensive operation. + + This function determines if the palette should be optimized using + some heuristics, then returns the list of palette entries in use. + + :param im: Image object + :param info: encoderinfo + :returns: list of indexes of palette entries in use, or None + """ + if im.mode in ("P", "L") and info and info.get("optimize", 0): + # Potentially expensive operation. + + # The palette saves 3 bytes per color not used, but palette + # lengths are restricted to 3*(2**N) bytes. Max saving would + # be 768 -> 6 bytes if we went all the way down to 2 colors. + # * If we're over 128 colors, we can't save any space. + # * If there aren't any holes, it's not worth collapsing. + # * If we have a 'large' image, the palette is in the noise. + + # create the new palette if not every color is used + optimise = _FORCE_OPTIMIZE or im.mode == 'L' + if optimise or im.width * im.height < 512 * 512: + # check which colors are used + used_palette_colors = [] + for i, count in enumerate(im.histogram()): + if count: + used_palette_colors.append(i) + + if optimise or (len(used_palette_colors) <= 128 and + max(used_palette_colors) > len(used_palette_colors)): + return used_palette_colors + + +def _get_color_table_size(palette_bytes): + # calculate the palette size for the header + import math + color_table_size = int(math.ceil(math.log(len(palette_bytes)//3, 2)))-1 + if color_table_size < 0: + color_table_size = 0 + return color_table_size + + +def _get_header_palette(palette_bytes): + """ + Returns the palette, null padded to the next power of 2 (*3) bytes + suitable for direct inclusion in the GIF header + + :param palette_bytes: Unpadded palette bytes, in RGBRGB form + :returns: Null padded palette + """ + color_table_size = _get_color_table_size(palette_bytes) + + # add the missing amount of bytes + # the palette has to be 2< 0: + palette_bytes += o8(0) * 3 * actual_target_size_diff + return palette_bytes + + +def _get_palette_bytes(im): + """ + Gets the palette for inclusion in the gif header + + :param im: Image object + :returns: Bytes, len<=768 suitable for inclusion in gif header + """ + return im.palette.palette + + +def _get_global_header(im, info): + """Return a list of strings representing a GIF header""" + + # Header Block + # http://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp + + version = b"87a" + for extensionKey in ["transparency", "duration", "loop", "comment"]: + if info and extensionKey in info: + if ((extensionKey == "duration" and info[extensionKey] == 0) or + (extensionKey == "comment" and not (1 <= len(info[extensionKey]) <= 255))): + continue + version = b"89a" + break + else: + if im.info.get("version") == b"89a": + version = b"89a" + + palette_bytes = _get_palette_bytes(im) + color_table_size = _get_color_table_size(palette_bytes) + + background = info["background"] if "background" in info else 0 + + return [ + b"GIF"+version + # signature + version + o16(im.size[0]) + # canvas width + o16(im.size[1]), # canvas height + + # Logical Screen Descriptor + # size of global color table + global color table flag + o8(color_table_size + 128), # packed fields + # background + reserved/aspect + o8(background) + o8(0), + + # Global Color Table + _get_header_palette(palette_bytes) + ] + + +def _write_frame_data(fp, im_frame, offset, params): + try: + im_frame.encoderinfo = params + + # local image header + _write_local_header(fp, im_frame, offset, 0) + + ImageFile._save(im_frame, fp, [("gif", (0, 0)+im_frame.size, 0, + RAWMODE[im_frame.mode])]) + + fp.write(b"\0") # end of image data + finally: + del im_frame.encoderinfo + +# -------------------------------------------------------------------- +# Legacy GIF utilities + + +def getheader(im, palette=None, info=None): + """ + Legacy Method to get Gif data from image. + + Warning:: May modify image data. + + :param im: Image object + :param palette: bytes object containing the source palette, or .... + :param info: encoderinfo + :returns: tuple of(list of header items, optimized palette) + + """ + used_palette_colors = _get_optimize(im, info) + + if info is None: + info = {} + + if "background" not in info and "background" in im.info: + info["background"] = im.info["background"] + + im_mod = _normalize_palette(im, palette, info) + im.palette = im_mod.palette + im.im = im_mod.im + header = _get_global_header(im, info) + + return header, used_palette_colors + + +# To specify duration, add the time in milliseconds to getdata(), +# e.g. getdata(im_frame, duration=1000) +def getdata(im, offset=(0, 0), **params): + """ + Legacy Method + + Return a list of strings representing this image. + The first string is a local image header, the rest contains + encoded image data. + + :param im: Image object + :param offset: Tuple of (x, y) pixels. Defaults to (0,0) + :param \\**params: E.g. duration or other encoder info parameters + :returns: List of Bytes containing gif encoded frame data + + """ + class Collector(object): + data = [] + + def write(self, data): + self.data.append(data) + + im.load() # make sure raster data is available + + fp = Collector() + + _write_frame_data(fp, im, offset, params) + + return fp.data + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(GifImageFile.format, GifImageFile, _accept) +Image.register_save(GifImageFile.format, _save) +Image.register_save_all(GifImageFile.format, _save_all) +Image.register_extension(GifImageFile.format, ".gif") +Image.register_mime(GifImageFile.format, "image/gif") + +# +# Uncomment the following line if you wish to use NETPBM/PBMPLUS +# instead of the built-in "uncompressed" GIF encoder + +# Image.register_save(GifImageFile.format, _save_netpbm) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GifImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GifImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73765aa39c1776df6b7dc376416d290ac4300a64 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GifImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpGradientFile.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpGradientFile.py new file mode 100644 index 0000000000000000000000000000000000000000..10593da24f34715211a9828f4eacd33044e76a69 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpGradientFile.py @@ -0,0 +1,138 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read (and render) GIMP gradient files +# +# History: +# 97-08-23 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +from math import pi, log, sin, sqrt +from ._binary import o8 + +# -------------------------------------------------------------------- +# Stuff to translate curve segments to palette values (derived from +# the corresponding code in GIMP, written by Federico Mena Quintero. +# See the GIMP distribution for more information.) +# + +EPSILON = 1e-10 + + +def linear(middle, pos): + if pos <= middle: + if middle < EPSILON: + return 0.0 + else: + return 0.5 * pos / middle + else: + pos = pos - middle + middle = 1.0 - middle + if middle < EPSILON: + return 1.0 + else: + return 0.5 + 0.5 * pos / middle + + +def curved(middle, pos): + return pos ** (log(0.5) / log(max(middle, EPSILON))) + + +def sine(middle, pos): + return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0 + + +def sphere_increasing(middle, pos): + return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2) + + +def sphere_decreasing(middle, pos): + return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2) + + +SEGMENTS = [linear, curved, sine, sphere_increasing, sphere_decreasing] + + +class GradientFile(object): + + gradient = None + + def getpalette(self, entries=256): + + palette = [] + + ix = 0 + x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] + + for i in range(entries): + + x = i / float(entries-1) + + while x1 < x: + ix += 1 + x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] + + w = x1 - x0 + + if w < EPSILON: + scale = segment(0.5, 0.5) + else: + scale = segment((xm - x0) / w, (x - x0) / w) + + # expand to RGBA + r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5)) + g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5)) + b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5)) + a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5)) + + # add to palette + palette.append(r + g + b + a) + + return b"".join(palette), "RGBA" + + +## +# File handler for GIMP's gradient format. + +class GimpGradientFile(GradientFile): + + def __init__(self, fp): + + if fp.readline()[:13] != b"GIMP Gradient": + raise SyntaxError("not a GIMP gradient file") + + line = fp.readline() + + # GIMP 1.2 gradient files don't contain a name, but GIMP 1.3 files do + if line.startswith(b"Name: "): + line = fp.readline().strip() + + count = int(line) + + gradient = [] + + for i in range(count): + + s = fp.readline().split() + w = [float(x) for x in s[:11]] + + x0, x1 = w[0], w[2] + xm = w[1] + rgb0 = w[3:7] + rgb1 = w[7:11] + + segment = SEGMENTS[int(s[11])] + cspace = int(s[12]) + + if cspace != 0: + raise IOError("cannot handle HSV colour space") + + gradient.append((x0, x1, xm, rgb0, rgb1, segment)) + + self.gradient = gradient diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpGradientFile.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpGradientFile.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba8f9ec190bbbfd8d06699f8da672e97c890583 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpGradientFile.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpPaletteFile.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpPaletteFile.py new file mode 100644 index 0000000000000000000000000000000000000000..6eef6a2ddfb01daecd591f742d635b467cb3f610 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpPaletteFile.py @@ -0,0 +1,62 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read GIMP palette files +# +# History: +# 1997-08-23 fl Created +# 2004-09-07 fl Support GIMP 2.0 palette files. +# +# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. +# Copyright (c) Fredrik Lundh 1997-2004. +# +# See the README file for information on usage and redistribution. +# + +import re +from ._binary import o8 + + +## +# File handler for GIMP's palette format. + +class GimpPaletteFile(object): + + rawmode = "RGB" + + def __init__(self, fp): + + self.palette = [o8(i)*3 for i in range(256)] + + if fp.readline()[:12] != b"GIMP Palette": + raise SyntaxError("not a GIMP palette file") + + i = 0 + + while i <= 255: + + s = fp.readline() + + if not s: + break + # skip fields and comment lines + if re.match(br"\w+:|#", s): + continue + if len(s) > 100: + raise SyntaxError("bad palette file") + + v = tuple(map(int, s.split()[:3])) + if len(v) != 3: + raise ValueError("bad palette entry") + + if 0 <= i <= 255: + self.palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2]) + + i += 1 + + self.palette = b"".join(self.palette) + + def getpalette(self): + + return self.palette, self.rawmode diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpPaletteFile.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpPaletteFile.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f60909f049f09ae5d147ffed8a0cd8ef6b874de9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GimpPaletteFile.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GribStubImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GribStubImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..33c8291ea22957c96cf2fc86a9e4dc4d1a2cfb4d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GribStubImagePlugin.py @@ -0,0 +1,73 @@ +# +# The Python Imaging Library +# $Id$ +# +# GRIB stub adapter +# +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile +from ._binary import i8 + +_handler = None + + +def register_handler(handler): + """ + Install application-specific GRIB image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + +def _accept(prefix): + return prefix[0:4] == b"GRIB" and i8(prefix[7]) == 1 + + +class GribStubImageFile(ImageFile.StubImageFile): + + format = "GRIB" + format_description = "GRIB" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(8)): + raise SyntaxError("Not a GRIB file") + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self.size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise IOError("GRIB save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept) +Image.register_save(GribStubImageFile.format, _save) + +Image.register_extension(GribStubImageFile.format, ".grib") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GribStubImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GribStubImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c37a0012983f61492f503db9119dd823125e12f5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/GribStubImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Hdf5StubImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Hdf5StubImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..de4d5bb0c971a282e41ca327c218a2de223560d8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Hdf5StubImagePlugin.py @@ -0,0 +1,72 @@ +# +# The Python Imaging Library +# $Id$ +# +# HDF5 stub adapter +# +# Copyright (c) 2000-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific HDF5 image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + +def _accept(prefix): + return prefix[:8] == b"\x89HDF\r\n\x1a\n" + + +class HDF5StubImageFile(ImageFile.StubImageFile): + + format = "HDF5" + format_description = "HDF5" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(8)): + raise SyntaxError("Not an HDF file") + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self.size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise IOError("HDF5 save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept) +Image.register_save(HDF5StubImageFile.format, _save) + +Image.register_extensions(HDF5StubImageFile.format, [".h5", ".hdf"]) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Hdf5StubImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Hdf5StubImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9d1064a26c2f365d4f0a82c744cb6b55f10ef95 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Hdf5StubImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcnsImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcnsImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..dc93f6a74f8bc34ca3710c1e10a8cd99180ab494 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcnsImagePlugin.py @@ -0,0 +1,374 @@ +# +# The Python Imaging Library. +# $Id$ +# +# macOS icns file decoder, based on icns.py by Bob Ippolito. +# +# history: +# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies. +# +# Copyright (c) 2004 by Bob Ippolito. +# Copyright (c) 2004 by Secret Labs. +# Copyright (c) 2004 by Fredrik Lundh. +# Copyright (c) 2014 by Alastair Houghton. +# +# See the README file for information on usage and redistribution. +# + +from PIL import Image, ImageFile, PngImagePlugin +from PIL._binary import i8 +import io +import os +import shutil +import struct +import sys +import tempfile + +enable_jpeg2k = hasattr(Image.core, 'jp2klib_version') +if enable_jpeg2k: + from PIL import Jpeg2KImagePlugin + +HEADERSIZE = 8 + + +def nextheader(fobj): + return struct.unpack('>4sI', fobj.read(HEADERSIZE)) + + +def read_32t(fobj, start_length, size): + # The 128x128 icon seems to have an extra header for some reason. + (start, length) = start_length + fobj.seek(start) + sig = fobj.read(4) + if sig != b'\x00\x00\x00\x00': + raise SyntaxError('Unknown signature, expecting 0x00000000') + return read_32(fobj, (start + 4, length - 4), size) + + +def read_32(fobj, start_length, size): + """ + Read a 32bit RGB icon resource. Seems to be either uncompressed or + an RLE packbits-like scheme. + """ + (start, length) = start_length + fobj.seek(start) + pixel_size = (size[0] * size[2], size[1] * size[2]) + sizesq = pixel_size[0] * pixel_size[1] + if length == sizesq * 3: + # uncompressed ("RGBRGBGB") + indata = fobj.read(length) + im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1) + else: + # decode image + im = Image.new("RGB", pixel_size, None) + for band_ix in range(3): + data = [] + bytesleft = sizesq + while bytesleft > 0: + byte = fobj.read(1) + if not byte: + break + byte = i8(byte) + if byte & 0x80: + blocksize = byte - 125 + byte = fobj.read(1) + for i in range(blocksize): + data.append(byte) + else: + blocksize = byte + 1 + data.append(fobj.read(blocksize)) + bytesleft -= blocksize + if bytesleft <= 0: + break + if bytesleft != 0: + raise SyntaxError( + "Error reading channel [%r left]" % bytesleft + ) + band = Image.frombuffer( + "L", pixel_size, b"".join(data), "raw", "L", 0, 1 + ) + im.im.putband(band.im, band_ix) + return {"RGB": im} + + +def read_mk(fobj, start_length, size): + # Alpha masks seem to be uncompressed + start = start_length[0] + fobj.seek(start) + pixel_size = (size[0] * size[2], size[1] * size[2]) + sizesq = pixel_size[0] * pixel_size[1] + band = Image.frombuffer( + "L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1 + ) + return {"A": band} + + +def read_png_or_jpeg2000(fobj, start_length, size): + (start, length) = start_length + fobj.seek(start) + sig = fobj.read(12) + if sig[:8] == b'\x89PNG\x0d\x0a\x1a\x0a': + fobj.seek(start) + im = PngImagePlugin.PngImageFile(fobj) + return {"RGBA": im} + elif sig[:4] == b'\xff\x4f\xff\x51' \ + or sig[:4] == b'\x0d\x0a\x87\x0a' \ + or sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a': + if not enable_jpeg2k: + raise ValueError('Unsupported icon subimage format (rebuild PIL ' + 'with JPEG 2000 support to fix this)') + # j2k, jpc or j2c + fobj.seek(start) + jp2kstream = fobj.read(length) + f = io.BytesIO(jp2kstream) + im = Jpeg2KImagePlugin.Jpeg2KImageFile(f) + if im.mode != 'RGBA': + im = im.convert('RGBA') + return {"RGBA": im} + else: + raise ValueError('Unsupported icon subimage format') + + +class IcnsFile(object): + + SIZES = { + (512, 512, 2): [ + (b'ic10', read_png_or_jpeg2000), + ], + (512, 512, 1): [ + (b'ic09', read_png_or_jpeg2000), + ], + (256, 256, 2): [ + (b'ic14', read_png_or_jpeg2000), + ], + (256, 256, 1): [ + (b'ic08', read_png_or_jpeg2000), + ], + (128, 128, 2): [ + (b'ic13', read_png_or_jpeg2000), + ], + (128, 128, 1): [ + (b'ic07', read_png_or_jpeg2000), + (b'it32', read_32t), + (b't8mk', read_mk), + ], + (64, 64, 1): [ + (b'icp6', read_png_or_jpeg2000), + ], + (32, 32, 2): [ + (b'ic12', read_png_or_jpeg2000), + ], + (48, 48, 1): [ + (b'ih32', read_32), + (b'h8mk', read_mk), + ], + (32, 32, 1): [ + (b'icp5', read_png_or_jpeg2000), + (b'il32', read_32), + (b'l8mk', read_mk), + ], + (16, 16, 2): [ + (b'ic11', read_png_or_jpeg2000), + ], + (16, 16, 1): [ + (b'icp4', read_png_or_jpeg2000), + (b'is32', read_32), + (b's8mk', read_mk), + ], + } + + def __init__(self, fobj): + """ + fobj is a file-like object as an icns resource + """ + # signature : (start, length) + self.dct = dct = {} + self.fobj = fobj + sig, filesize = nextheader(fobj) + if sig != b'icns': + raise SyntaxError('not an icns file') + i = HEADERSIZE + while i < filesize: + sig, blocksize = nextheader(fobj) + if blocksize <= 0: + raise SyntaxError('invalid block header') + i += HEADERSIZE + blocksize -= HEADERSIZE + dct[sig] = (i, blocksize) + fobj.seek(blocksize, 1) + i += blocksize + + def itersizes(self): + sizes = [] + for size, fmts in self.SIZES.items(): + for (fmt, reader) in fmts: + if fmt in self.dct: + sizes.append(size) + break + return sizes + + def bestsize(self): + sizes = self.itersizes() + if not sizes: + raise SyntaxError("No 32bit icon resources found") + return max(sizes) + + def dataforsize(self, size): + """ + Get an icon resource as {channel: array}. Note that + the arrays are bottom-up like windows bitmaps and will likely + need to be flipped or transposed in some way. + """ + dct = {} + for code, reader in self.SIZES[size]: + desc = self.dct.get(code) + if desc is not None: + dct.update(reader(self.fobj, desc, size)) + return dct + + def getimage(self, size=None): + if size is None: + size = self.bestsize() + if len(size) == 2: + size = (size[0], size[1], 1) + channels = self.dataforsize(size) + + im = channels.get('RGBA', None) + if im: + return im + + im = channels.get("RGB").copy() + try: + im.putalpha(channels["A"]) + except KeyError: + pass + return im + + +## +# Image plugin for Mac OS icons. + +class IcnsImageFile(ImageFile.ImageFile): + """ + PIL image support for Mac OS .icns files. + Chooses the best resolution, but will possibly load + a different size image if you mutate the size attribute + before calling 'load'. + + The info dictionary has a key 'sizes' that is a list + of sizes that the icns file has. + """ + + format = "ICNS" + format_description = "Mac OS icns resource" + + def _open(self): + self.icns = IcnsFile(self.fp) + self.mode = 'RGBA' + self.best_size = self.icns.bestsize() + self.size = (self.best_size[0] * self.best_size[2], + self.best_size[1] * self.best_size[2]) + self.info['sizes'] = self.icns.itersizes() + # Just use this to see if it's loaded or not yet. + self.tile = ('',) + + def load(self): + if len(self.size) == 3: + self.best_size = self.size + self.size = (self.best_size[0] * self.best_size[2], + self.best_size[1] * self.best_size[2]) + + Image.Image.load(self) + if not self.tile: + return + self.load_prepare() + # This is likely NOT the best way to do it, but whatever. + im = self.icns.getimage(self.best_size) + + # If this is a PNG or JPEG 2000, it won't be loaded yet + im.load() + + self.im = im.im + self.mode = im.mode + self.size = im.size + self.fp = None + self.icns = None + self.tile = () + self.load_end() + + +def _save(im, fp, filename): + """ + Saves the image as a series of PNG files, + that are then converted to a .icns file + using the macOS command line utility 'iconutil'. + + macOS only. + """ + if hasattr(fp, "flush"): + fp.flush() + + # create the temporary set of pngs + iconset = tempfile.mkdtemp('.iconset') + provided_images = {im.width: im + for im in im.encoderinfo.get("append_images", [])} + last_w = None + for w in [16, 32, 128, 256, 512]: + prefix = 'icon_{}x{}'.format(w, w) + + first_path = os.path.join(iconset, prefix+'.png') + if last_w == w: + shutil.copyfile(second_path, first_path) + else: + im_w = provided_images.get(w, im.resize((w, w), Image.LANCZOS)) + im_w.save(first_path) + + second_path = os.path.join(iconset, prefix+'@2x.png') + im_w2 = provided_images.get(w*2, im.resize((w*2, w*2), Image.LANCZOS)) + im_w2.save(second_path) + last_w = w*2 + + # iconutil -c icns -o {} {} + from subprocess import Popen, PIPE, CalledProcessError + + convert_cmd = ["iconutil", "-c", "icns", "-o", filename, iconset] + with open(os.devnull, 'wb') as devnull: + convert_proc = Popen(convert_cmd, stdout=PIPE, stderr=devnull) + + convert_proc.stdout.close() + + retcode = convert_proc.wait() + + # remove the temporary files + shutil.rmtree(iconset) + + if retcode: + raise CalledProcessError(retcode, convert_cmd) + + +Image.register_open(IcnsImageFile.format, IcnsImageFile, + lambda x: x[:4] == b'icns') +Image.register_extension(IcnsImageFile.format, '.icns') + +if sys.platform == 'darwin': + Image.register_save(IcnsImageFile.format, _save) + + Image.register_mime(IcnsImageFile.format, "image/icns") + + +if __name__ == '__main__': + + if len(sys.argv) < 2: + print("Syntax: python IcnsImagePlugin.py [file]") + sys.exit() + + imf = IcnsImageFile(open(sys.argv[1], 'rb')) + for size in imf.info['sizes']: + imf.size = size + imf.load() + im = imf.im + im.save('out-%s-%s-%s.png' % size) + im = Image.open(sys.argv[1]) + im.save("out.png") + if sys.platform == 'windows': + os.startfile("out.png") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcnsImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcnsImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea16ad448f67ea9c434f963ae39ca7031b48aed0 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcnsImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcoImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcoImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..428fdd41a3c54b03db828eba1c50dac6943741e2 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcoImagePlugin.py @@ -0,0 +1,284 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Windows Icon support for PIL +# +# History: +# 96-05-27 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis +# . +# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki +# +# Icon format references: +# * https://en.wikipedia.org/wiki/ICO_(file_format) +# * https://msdn.microsoft.com/en-us/library/ms997538.aspx + + +import struct +from io import BytesIO + +from . import Image, ImageFile, BmpImagePlugin, PngImagePlugin +from ._binary import i8, i16le as i16, i32le as i32 +from math import log, ceil + +__version__ = "0.1" + +# +# -------------------------------------------------------------------- + +_MAGIC = b"\0\0\1\0" + + +def _save(im, fp, filename): + fp.write(_MAGIC) # (2+2) + sizes = im.encoderinfo.get("sizes", + [(16, 16), (24, 24), (32, 32), (48, 48), + (64, 64), (128, 128), (256, 256)]) + width, height = im.size + sizes = filter(lambda x: False if (x[0] > width or x[1] > height or + x[0] > 256 or x[1] > 256) else True, + sizes) + sizes = list(sizes) + fp.write(struct.pack("=8bpp) + 'reserved': i8(s[3]), + 'planes': i16(s[4:]), + 'bpp': i16(s[6:]), + 'size': i32(s[8:]), + 'offset': i32(s[12:]) + } + + # See Wikipedia + for j in ('width', 'height'): + if not icon_header[j]: + icon_header[j] = 256 + + # See Wikipedia notes about color depth. + # We need this just to differ images with equal sizes + icon_header['color_depth'] = (icon_header['bpp'] or + (icon_header['nb_color'] != 0 and + ceil(log(icon_header['nb_color'], + 2))) or 256) + + icon_header['dim'] = (icon_header['width'], icon_header['height']) + icon_header['square'] = (icon_header['width'] * + icon_header['height']) + + self.entry.append(icon_header) + + self.entry = sorted(self.entry, key=lambda x: x['color_depth']) + # ICO images are usually squares + # self.entry = sorted(self.entry, key=lambda x: x['width']) + self.entry = sorted(self.entry, key=lambda x: x['square']) + self.entry.reverse() + + def sizes(self): + """ + Get a list of all available icon sizes and color depths. + """ + return {(h['width'], h['height']) for h in self.entry} + + def getimage(self, size, bpp=False): + """ + Get an image from the icon + """ + for (i, h) in enumerate(self.entry): + if size == h['dim'] and (bpp is False or bpp == h['color_depth']): + return self.frame(i) + return self.frame(0) + + def frame(self, idx): + """ + Get an image from frame idx + """ + + header = self.entry[idx] + + self.buf.seek(header['offset']) + data = self.buf.read(8) + self.buf.seek(header['offset']) + + if data[:8] == PngImagePlugin._MAGIC: + # png frame + im = PngImagePlugin.PngImageFile(self.buf) + else: + # XOR + AND mask bmp frame + im = BmpImagePlugin.DibImageFile(self.buf) + + # change tile dimension to only encompass XOR image + im.size = (im.size[0], int(im.size[1] / 2)) + d, e, o, a = im.tile[0] + im.tile[0] = d, (0, 0) + im.size, o, a + + # figure out where AND mask image starts + mode = a[0] + bpp = 8 + for k, v in BmpImagePlugin.BIT2MODE.items(): + if mode == v[1]: + bpp = k + break + + if 32 == bpp: + # 32-bit color depth icon image allows semitransparent areas + # PIL's DIB format ignores transparency bits, recover them. + # The DIB is packed in BGRX byte order where X is the alpha + # channel. + + # Back up to start of bmp data + self.buf.seek(o) + # extract every 4th byte (eg. 3,7,11,15,...) + alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4] + + # convert to an 8bpp grayscale image + mask = Image.frombuffer( + 'L', # 8bpp + im.size, # (w, h) + alpha_bytes, # source chars + 'raw', # raw decoder + ('L', 0, -1) # 8bpp inverted, unpadded, reversed + ) + else: + # get AND image from end of bitmap + w = im.size[0] + if (w % 32) > 0: + # bitmap row data is aligned to word boundaries + w += 32 - (im.size[0] % 32) + + # the total mask data is + # padded row size * height / bits per char + + and_mask_offset = o + int(im.size[0] * im.size[1] * + (bpp / 8.0)) + total_bytes = int((w * im.size[1]) / 8) + + self.buf.seek(and_mask_offset) + mask_data = self.buf.read(total_bytes) + + # convert raw data to image + mask = Image.frombuffer( + '1', # 1 bpp + im.size, # (w, h) + mask_data, # source chars + 'raw', # raw decoder + ('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed + ) + + # now we have two images, im is XOR image and mask is AND image + + # apply mask image as alpha channel + im = im.convert('RGBA') + im.putalpha(mask) + + return im + + +## +# Image plugin for Windows Icon files. + +class IcoImageFile(ImageFile.ImageFile): + """ + PIL read-only image support for Microsoft Windows .ico files. + + By default the largest resolution image in the file will be loaded. This + can be changed by altering the 'size' attribute before calling 'load'. + + The info dictionary has a key 'sizes' that is a list of the sizes available + in the icon file. + + Handles classic, XP and Vista icon formats. + + This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis + . + https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki + """ + format = "ICO" + format_description = "Windows Icon" + + def _open(self): + self.ico = IcoFile(self.fp) + self.info['sizes'] = self.ico.sizes() + self.size = self.ico.entry[0]['dim'] + self.load() + + def load(self): + im = self.ico.getimage(self.size) + # if tile is PNG, it won't really be loaded yet + im.load() + self.im = im.im + self.mode = im.mode + self.size = im.size + + def load_seek(self): + # Flag the ImageFile.Parser so that it + # just does all the decode at the end. + pass +# +# -------------------------------------------------------------------- + + +Image.register_open(IcoImageFile.format, IcoImageFile, _accept) +Image.register_save(IcoImageFile.format, _save) +Image.register_extension(IcoImageFile.format, ".ico") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcoImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcoImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18a63ca58f0c11fb536ca72e469e2a07ab710cea Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IcoImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..b87fa908392d05efeadc6ddda8e77a1b66a93438 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImImagePlugin.py @@ -0,0 +1,347 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IFUNC IM file handling for PIL +# +# history: +# 1995-09-01 fl Created. +# 1997-01-03 fl Save palette images +# 1997-01-08 fl Added sequence support +# 1997-01-23 fl Added P and RGB save support +# 1997-05-31 fl Read floating point images +# 1997-06-22 fl Save floating point images +# 1997-08-27 fl Read and save 1-bit images +# 1998-06-25 fl Added support for RGB+LUT images +# 1998-07-02 fl Added support for YCC images +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 1998-12-29 fl Added I;16 support +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) +# 2003-09-26 fl Added LA/PA support +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2001 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +import re +from . import Image, ImageFile, ImagePalette +from ._binary import i8 + +__version__ = "0.7" + + +# -------------------------------------------------------------------- +# Standard tags + +COMMENT = "Comment" +DATE = "Date" +EQUIPMENT = "Digitalization equipment" +FRAMES = "File size (no of images)" +LUT = "Lut" +NAME = "Name" +SCALE = "Scale (x,y)" +SIZE = "Image size (x*y)" +MODE = "Image type" + +TAGS = {COMMENT: 0, DATE: 0, EQUIPMENT: 0, FRAMES: 0, LUT: 0, NAME: 0, + SCALE: 0, SIZE: 0, MODE: 0} + +OPEN = { + # ifunc93/p3cfunc formats + "0 1 image": ("1", "1"), + "L 1 image": ("1", "1"), + "Greyscale image": ("L", "L"), + "Grayscale image": ("L", "L"), + "RGB image": ("RGB", "RGB;L"), + "RLB image": ("RGB", "RLB"), + "RYB image": ("RGB", "RLB"), + "B1 image": ("1", "1"), + "B2 image": ("P", "P;2"), + "B4 image": ("P", "P;4"), + "X 24 image": ("RGB", "RGB"), + "L 32 S image": ("I", "I;32"), + "L 32 F image": ("F", "F;32"), + # old p3cfunc formats + "RGB3 image": ("RGB", "RGB;T"), + "RYB3 image": ("RGB", "RYB;T"), + # extensions + "LA image": ("LA", "LA;L"), + "RGBA image": ("RGBA", "RGBA;L"), + "RGBX image": ("RGBX", "RGBX;L"), + "CMYK image": ("CMYK", "CMYK;L"), + "YCC image": ("YCbCr", "YCbCr;L"), +} + +# ifunc95 extensions +for i in ["8", "8S", "16", "16S", "32", "32F"]: + OPEN["L %s image" % i] = ("F", "F;%s" % i) + OPEN["L*%s image" % i] = ("F", "F;%s" % i) +for i in ["16", "16L", "16B"]: + OPEN["L %s image" % i] = ("I;%s" % i, "I;%s" % i) + OPEN["L*%s image" % i] = ("I;%s" % i, "I;%s" % i) +for i in ["32S"]: + OPEN["L %s image" % i] = ("I", "I;%s" % i) + OPEN["L*%s image" % i] = ("I", "I;%s" % i) +for i in range(2, 33): + OPEN["L*%s image" % i] = ("F", "F;%s" % i) + + +# -------------------------------------------------------------------- +# Read IM directory + +split = re.compile(br"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$") + + +def number(s): + try: + return int(s) + except ValueError: + return float(s) + + +## +# Image plugin for the IFUNC IM file format. + +class ImImageFile(ImageFile.ImageFile): + + format = "IM" + format_description = "IFUNC Image Memory" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # Quick rejection: if there's not an LF among the first + # 100 bytes, this is (probably) not a text header. + + if b"\n" not in self.fp.read(100): + raise SyntaxError("not an IM file") + self.fp.seek(0) + + n = 0 + + # Default values + self.info[MODE] = "L" + self.info[SIZE] = (512, 512) + self.info[FRAMES] = 1 + + self.rawmode = "L" + + while True: + + s = self.fp.read(1) + + # Some versions of IFUNC uses \n\r instead of \r\n... + if s == b"\r": + continue + + if not s or s == b'\0' or s == b'\x1A': + break + + # FIXME: this may read whole file if not a text file + s = s + self.fp.readline() + + if len(s) > 100: + raise SyntaxError("not an IM file") + + if s[-2:] == b'\r\n': + s = s[:-2] + elif s[-1:] == b'\n': + s = s[:-1] + + try: + m = split.match(s) + except re.error as v: + raise SyntaxError("not an IM file") + + if m: + + k, v = m.group(1, 2) + + # Don't know if this is the correct encoding, + # but a decent guess (I guess) + k = k.decode('latin-1', 'replace') + v = v.decode('latin-1', 'replace') + + # Convert value as appropriate + if k in [FRAMES, SCALE, SIZE]: + v = v.replace("*", ",") + v = tuple(map(number, v.split(","))) + if len(v) == 1: + v = v[0] + elif k == MODE and v in OPEN: + v, self.rawmode = OPEN[v] + + # Add to dictionary. Note that COMMENT tags are + # combined into a list of strings. + if k == COMMENT: + if k in self.info: + self.info[k].append(v) + else: + self.info[k] = [v] + else: + self.info[k] = v + + if k in TAGS: + n += 1 + + else: + + raise SyntaxError("Syntax error in IM header: " + + s.decode('ascii', 'replace')) + + if not n: + raise SyntaxError("Not an IM file") + + # Basic attributes + self.size = self.info[SIZE] + self.mode = self.info[MODE] + + # Skip forward to start of image data + while s and s[0:1] != b'\x1A': + s = self.fp.read(1) + if not s: + raise SyntaxError("File truncated") + + if LUT in self.info: + # convert lookup table to palette or lut attribute + palette = self.fp.read(768) + greyscale = 1 # greyscale palette + linear = 1 # linear greyscale palette + for i in range(256): + if palette[i] == palette[i+256] == palette[i+512]: + if i8(palette[i]) != i: + linear = 0 + else: + greyscale = 0 + if self.mode == "L" or self.mode == "LA": + if greyscale: + if not linear: + self.lut = [i8(c) for c in palette[:256]] + else: + if self.mode == "L": + self.mode = self.rawmode = "P" + elif self.mode == "LA": + self.mode = self.rawmode = "PA" + self.palette = ImagePalette.raw("RGB;L", palette) + elif self.mode == "RGB": + if not greyscale or not linear: + self.lut = [i8(c) for c in palette] + + self.frame = 0 + + self.__offset = offs = self.fp.tell() + + self.__fp = self.fp # FIXME: hack + + if self.rawmode[:2] == "F;": + + # ifunc95 formats + try: + # use bit decoder (if necessary) + bits = int(self.rawmode[2:]) + if bits not in [8, 16, 32]: + self.tile = [("bit", (0, 0)+self.size, offs, + (bits, 8, 3, 0, -1))] + return + except ValueError: + pass + + if self.rawmode in ["RGB;T", "RYB;T"]: + # Old LabEye/3PC files. Would be very surprised if anyone + # ever stumbled upon such a file ;-) + size = self.size[0] * self.size[1] + self.tile = [("raw", (0, 0)+self.size, offs, ("G", 0, -1)), + ("raw", (0, 0)+self.size, offs+size, ("R", 0, -1)), + ("raw", (0, 0)+self.size, offs+2*size, ("B", 0, -1))] + else: + # LabEye/IFUNC files + self.tile = [("raw", (0, 0)+self.size, offs, + (self.rawmode, 0, -1))] + + @property + def n_frames(self): + return self.info[FRAMES] + + @property + def is_animated(self): + return self.info[FRAMES] > 1 + + def seek(self, frame): + if not self._seek_check(frame): + return + + self.frame = frame + + if self.mode == "1": + bits = 1 + else: + bits = 8 * len(self.mode) + + size = ((self.size[0] * bits + 7) // 8) * self.size[1] + offs = self.__offset + frame * size + + self.fp = self.__fp + + self.tile = [("raw", (0, 0)+self.size, offs, (self.rawmode, 0, -1))] + + def tell(self): + return self.frame + +# +# -------------------------------------------------------------------- +# Save IM files + + +SAVE = { + # mode: (im type, raw mode) + "1": ("0 1", "1"), + "L": ("Greyscale", "L"), + "LA": ("LA", "LA;L"), + "P": ("Greyscale", "P"), + "PA": ("LA", "PA;L"), + "I": ("L 32S", "I;32S"), + "I;16": ("L 16", "I;16"), + "I;16L": ("L 16L", "I;16L"), + "I;16B": ("L 16B", "I;16B"), + "F": ("L 32F", "F;32F"), + "RGB": ("RGB", "RGB;L"), + "RGBA": ("RGBA", "RGBA;L"), + "RGBX": ("RGBX", "RGBX;L"), + "CMYK": ("CMYK", "CMYK;L"), + "YCbCr": ("YCC", "YCbCr;L") +} + + +def _save(im, fp, filename): + + try: + image_type, rawmode = SAVE[im.mode] + except KeyError: + raise ValueError("Cannot save %s images as IM" % im.mode) + + frames = im.encoderinfo.get("frames", 1) + + fp.write(("Image type: %s image\r\n" % image_type).encode('ascii')) + if filename: + fp.write(("Name: %s\r\n" % filename).encode('ascii')) + fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode('ascii')) + fp.write(("File size (no of images): %d\r\n" % frames).encode('ascii')) + if im.mode == "P": + fp.write(b"Lut: 1\r\n") + fp.write(b"\000" * (511-fp.tell()) + b"\032") + if im.mode == "P": + fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes + ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, -1))]) + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(ImImageFile.format, ImImageFile) +Image.register_save(ImImageFile.format, _save) + +Image.register_extension(ImImageFile.format, ".im") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b8943267ab52d11a1a03c28ed3ed970bf9cbd1e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Image.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Image.py new file mode 100644 index 0000000000000000000000000000000000000000..c589526577b67954d1f7b97e9a8163c34520d4ef --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Image.py @@ -0,0 +1,2930 @@ +# +# The Python Imaging Library. +# $Id$ +# +# the Image class wrapper +# +# partial release history: +# 1995-09-09 fl Created +# 1996-03-11 fl PIL release 0.0 (proof of concept) +# 1996-04-30 fl PIL release 0.1b1 +# 1999-07-28 fl PIL release 1.0 final +# 2000-06-07 fl PIL release 1.1 +# 2000-10-20 fl PIL release 1.1.1 +# 2001-05-07 fl PIL release 1.1.2 +# 2002-03-15 fl PIL release 1.1.3 +# 2003-05-10 fl PIL release 1.1.4 +# 2005-03-28 fl PIL release 1.1.5 +# 2006-12-02 fl PIL release 1.1.6 +# 2009-11-15 fl PIL release 1.1.7 +# +# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-2009 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +# VERSION is deprecated and will be removed in Pillow 6.0.0. +# PILLOW_VERSION is deprecated and will be removed after that. +# Use __version__ instead. +from . import VERSION, PILLOW_VERSION, __version__, _plugins +from ._util import py3 + +import logging +import warnings +import math + +logger = logging.getLogger(__name__) + + +class DecompressionBombWarning(RuntimeWarning): + pass + + +class DecompressionBombError(Exception): + pass + + +class _imaging_not_installed(object): + # module placeholder + def __getattr__(self, id): + raise ImportError("The _imaging C module is not installed") + + +# Limit to around a quarter gigabyte for a 24 bit (3 bpp) image +MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 // 4 // 3) + + +try: + # If the _imaging C module is not present, Pillow will not load. + # Note that other modules should not refer to _imaging directly; + # import Image and use the Image.core variable instead. + # Also note that Image.core is not a publicly documented interface, + # and should be considered private and subject to change. + from . import _imaging as core + if __version__ != getattr(core, 'PILLOW_VERSION', None): + raise ImportError("The _imaging extension was built for another " + "version of Pillow or PIL:\n" + "Core version: %s\n" + "Pillow version: %s" % + (getattr(core, 'PILLOW_VERSION', None), + __version__)) + +except ImportError as v: + core = _imaging_not_installed() + # Explanations for ways that we know we might have an import error + if str(v).startswith("Module use of python"): + # The _imaging C module is present, but not compiled for + # the right version (windows only). Print a warning, if + # possible. + warnings.warn( + "The _imaging extension was built for another version " + "of Python.", + RuntimeWarning + ) + elif str(v).startswith("The _imaging extension"): + warnings.warn(str(v), RuntimeWarning) + elif "Symbol not found: _PyUnicodeUCS2_" in str(v): + # should match _PyUnicodeUCS2_FromString and + # _PyUnicodeUCS2_AsLatin1String + warnings.warn( + "The _imaging extension was built for Python with UCS2 support; " + "recompile Pillow or build Python --without-wide-unicode. ", + RuntimeWarning + ) + elif "Symbol not found: _PyUnicodeUCS4_" in str(v): + # should match _PyUnicodeUCS4_FromString and + # _PyUnicodeUCS4_AsLatin1String + warnings.warn( + "The _imaging extension was built for Python with UCS4 support; " + "recompile Pillow or build Python --with-wide-unicode. ", + RuntimeWarning + ) + # Fail here anyway. Don't let people run with a mostly broken Pillow. + # see docs/porting.rst + raise + +try: + import builtins +except ImportError: + import __builtin__ + builtins = __builtin__ + +from . import ImageMode +from ._binary import i8 +from ._util import isPath, isStringType, deferred_error + +import os +import sys +import io +import struct +import atexit + +# type stuff +import numbers +try: + # Python 3 + from collections.abc import Callable +except ImportError: + # Python 2.7 + from collections import Callable + + +# works everywhere, win for pypy, not cpython +USE_CFFI_ACCESS = hasattr(sys, 'pypy_version_info') +try: + import cffi + HAS_CFFI = True +except ImportError: + HAS_CFFI = False + +try: + from pathlib import Path + HAS_PATHLIB = True +except ImportError: + try: + from pathlib2 import Path + HAS_PATHLIB = True + except ImportError: + HAS_PATHLIB = False + + +def isImageType(t): + """ + Checks if an object is an image object. + + .. warning:: + + This function is for internal use only. + + :param t: object to check if it's an image + :returns: True if the object is an image + """ + return hasattr(t, "im") + + +# +# Constants (also defined in _imagingmodule.c!) + +NONE = 0 + +# transpose +FLIP_LEFT_RIGHT = 0 +FLIP_TOP_BOTTOM = 1 +ROTATE_90 = 2 +ROTATE_180 = 3 +ROTATE_270 = 4 +TRANSPOSE = 5 +TRANSVERSE = 6 + +# transforms +AFFINE = 0 +EXTENT = 1 +PERSPECTIVE = 2 +QUAD = 3 +MESH = 4 + +# resampling filters +NEAREST = NONE = 0 +BOX = 4 +BILINEAR = LINEAR = 2 +HAMMING = 5 +BICUBIC = CUBIC = 3 +LANCZOS = ANTIALIAS = 1 + +# dithers +NEAREST = NONE = 0 +ORDERED = 1 # Not yet implemented +RASTERIZE = 2 # Not yet implemented +FLOYDSTEINBERG = 3 # default + +# palettes/quantizers +WEB = 0 +ADAPTIVE = 1 + +MEDIANCUT = 0 +MAXCOVERAGE = 1 +FASTOCTREE = 2 +LIBIMAGEQUANT = 3 + +# categories +NORMAL = 0 +SEQUENCE = 1 +CONTAINER = 2 + +if hasattr(core, 'DEFAULT_STRATEGY'): + DEFAULT_STRATEGY = core.DEFAULT_STRATEGY + FILTERED = core.FILTERED + HUFFMAN_ONLY = core.HUFFMAN_ONLY + RLE = core.RLE + FIXED = core.FIXED + + +# -------------------------------------------------------------------- +# Registries + +ID = [] +OPEN = {} +MIME = {} +SAVE = {} +SAVE_ALL = {} +EXTENSION = {} +DECODERS = {} +ENCODERS = {} + +# -------------------------------------------------------------------- +# Modes supported by this version + +_MODEINFO = { + # NOTE: this table will be removed in future versions. use + # getmode* functions or ImageMode descriptors instead. + + # official modes + "1": ("L", "L", ("1",)), + "L": ("L", "L", ("L",)), + "I": ("L", "I", ("I",)), + "F": ("L", "F", ("F",)), + "P": ("RGB", "L", ("P",)), + "RGB": ("RGB", "L", ("R", "G", "B")), + "RGBX": ("RGB", "L", ("R", "G", "B", "X")), + "RGBA": ("RGB", "L", ("R", "G", "B", "A")), + "CMYK": ("RGB", "L", ("C", "M", "Y", "K")), + "YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")), + "LAB": ("RGB", "L", ("L", "A", "B")), + "HSV": ("RGB", "L", ("H", "S", "V")), + + # Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and + # BGR;24. Use these modes only if you know exactly what you're + # doing... + +} + +if sys.byteorder == 'little': + _ENDIAN = '<' +else: + _ENDIAN = '>' + +_MODE_CONV = { + # official modes + "1": ('|b1', None), # Bits need to be extended to bytes + "L": ('|u1', None), + "LA": ('|u1', 2), + "I": (_ENDIAN + 'i4', None), + "F": (_ENDIAN + 'f4', None), + "P": ('|u1', None), + "RGB": ('|u1', 3), + "RGBX": ('|u1', 4), + "RGBA": ('|u1', 4), + "CMYK": ('|u1', 4), + "YCbCr": ('|u1', 3), + "LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1 + "HSV": ('|u1', 3), + # I;16 == I;16L, and I;32 == I;32L + "I;16": ('u2', None), + "I;16L": ('i2', None), + "I;16LS": ('u4', None), + "I;32L": ('i4', None), + "I;32LS": ('= 1: + return + + try: + from . import BmpImagePlugin + except ImportError: + pass + try: + from . import GifImagePlugin + except ImportError: + pass + try: + from . import JpegImagePlugin + except ImportError: + pass + try: + from . import PpmImagePlugin + except ImportError: + pass + try: + from . import PngImagePlugin + except ImportError: + pass +# try: +# import TiffImagePlugin +# except ImportError: +# pass + + _initialized = 1 + + +def init(): + """ + Explicitly initializes the Python Imaging Library. This function + loads all available file format drivers. + """ + + global _initialized + if _initialized >= 2: + return 0 + + for plugin in _plugins: + try: + logger.debug("Importing %s", plugin) + __import__("PIL.%s" % plugin, globals(), locals(), []) + except ImportError as e: + logger.debug("Image: failed to import %s: %s", plugin, e) + + if OPEN or SAVE: + _initialized = 2 + return 1 + + +# -------------------------------------------------------------------- +# Codec factories (used by tobytes/frombytes and ImageFile.load) + +def _getdecoder(mode, decoder_name, args, extra=()): + + # tweak arguments + if args is None: + args = () + elif not isinstance(args, tuple): + args = (args,) + + try: + decoder = DECODERS[decoder_name] + return decoder(mode, *args + extra) + except KeyError: + pass + try: + # get decoder + decoder = getattr(core, decoder_name + "_decoder") + # print(decoder, mode, args + extra) + return decoder(mode, *args + extra) + except AttributeError: + raise IOError("decoder %s not available" % decoder_name) + + +def _getencoder(mode, encoder_name, args, extra=()): + + # tweak arguments + if args is None: + args = () + elif not isinstance(args, tuple): + args = (args,) + + try: + encoder = ENCODERS[encoder_name] + return encoder(mode, *args + extra) + except KeyError: + pass + try: + # get encoder + encoder = getattr(core, encoder_name + "_encoder") + # print(encoder, mode, args + extra) + return encoder(mode, *args + extra) + except AttributeError: + raise IOError("encoder %s not available" % encoder_name) + + +# -------------------------------------------------------------------- +# Simple expression analyzer + +def coerce_e(value): + return value if isinstance(value, _E) else _E(value) + + +class _E(object): + def __init__(self, data): + self.data = data + + def __add__(self, other): + return _E((self.data, "__add__", coerce_e(other).data)) + + def __mul__(self, other): + return _E((self.data, "__mul__", coerce_e(other).data)) + + +def _getscaleoffset(expr): + stub = ["stub"] + data = expr(_E(stub)).data + try: + (a, b, c) = data # simplified syntax + if (a is stub and b == "__mul__" and isinstance(c, numbers.Number)): + return c, 0.0 + if a is stub and b == "__add__" and isinstance(c, numbers.Number): + return 1.0, c + except TypeError: + pass + try: + ((a, b, c), d, e) = data # full syntax + if (a is stub and b == "__mul__" and isinstance(c, numbers.Number) and + d == "__add__" and isinstance(e, numbers.Number)): + return c, e + except TypeError: + pass + raise ValueError("illegal expression") + + +# -------------------------------------------------------------------- +# Implementation wrapper + +class Image(object): + """ + This class represents an image object. To create + :py:class:`~PIL.Image.Image` objects, use the appropriate factory + functions. There's hardly ever any reason to call the Image constructor + directly. + + * :py:func:`~PIL.Image.open` + * :py:func:`~PIL.Image.new` + * :py:func:`~PIL.Image.frombytes` + """ + format = None + format_description = None + _close_exclusive_fp_after_loading = True + + def __init__(self): + # FIXME: take "new" parameters / other image? + # FIXME: turn mode and size into delegating properties? + self.im = None + self.mode = "" + self.size = (0, 0) + self.palette = None + self.info = {} + self.category = NORMAL + self.readonly = 0 + self.pyaccess = None + + @property + def width(self): + return self.size[0] + + @property + def height(self): + return self.size[1] + + def _new(self, im): + new = Image() + new.im = im + new.mode = im.mode + new.size = im.size + if im.mode in ('P', 'PA'): + if self.palette: + new.palette = self.palette.copy() + else: + from . import ImagePalette + new.palette = ImagePalette.ImagePalette() + new.info = self.info.copy() + return new + + # Context Manager Support + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + """ + Closes the file pointer, if possible. + + This operation will destroy the image core and release its memory. + The image data will be unusable afterward. + + This function is only required to close images that have not + had their file read and closed by the + :py:meth:`~PIL.Image.Image.load` method. See + :ref:`file-handling` for more information. + """ + try: + self.fp.close() + self.fp = None + except Exception as msg: + logger.debug("Error closing: %s", msg) + + if getattr(self, 'map', None): + self.map = None + + # Instead of simply setting to None, we're setting up a + # deferred error that will better explain that the core image + # object is gone. + self.im = deferred_error(ValueError("Operation on closed image")) + + if sys.version_info.major >= 3: + def __del__(self): + if (hasattr(self, 'fp') and hasattr(self, '_exclusive_fp') + and self.fp and self._exclusive_fp): + self.fp.close() + self.fp = None + + def _copy(self): + self.load() + self.im = self.im.copy() + self.pyaccess = None + self.readonly = 0 + + def _ensure_mutable(self): + if self.readonly: + self._copy() + else: + self.load() + + def _dump(self, file=None, format=None, **options): + import tempfile + + suffix = '' + if format: + suffix = '.'+format + + if not file: + f, filename = tempfile.mkstemp(suffix) + os.close(f) + else: + filename = file + if not filename.endswith(suffix): + filename = filename + suffix + + self.load() + + if not format or format == "PPM": + self.im.save_ppm(filename) + else: + self.save(filename, format, **options) + + return filename + + def __eq__(self, other): + return (isinstance(other, Image) and + self.__class__.__name__ == other.__class__.__name__ and + self.mode == other.mode and + self.size == other.size and + self.info == other.info and + self.category == other.category and + self.readonly == other.readonly and + self.getpalette() == other.getpalette() and + self.tobytes() == other.tobytes()) + + def __ne__(self, other): + eq = (self == other) + return not eq + + def __repr__(self): + return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % ( + self.__class__.__module__, self.__class__.__name__, + self.mode, self.size[0], self.size[1], + id(self) + ) + + def _repr_png_(self): + """ iPython display hook support + + :returns: png version of the image as bytes + """ + from io import BytesIO + b = BytesIO() + self.save(b, 'PNG') + return b.getvalue() + + @property + def __array_interface__(self): + # numpy array interface support + new = {} + shape, typestr = _conv_type_shape(self) + new['shape'] = shape + new['typestr'] = typestr + new['version'] = 3 + if self.mode == '1': + # Binary images need to be extended from bits to bytes + # See: https://github.com/python-pillow/Pillow/issues/350 + new['data'] = self.tobytes('raw', 'L') + else: + new['data'] = self.tobytes() + return new + + def __getstate__(self): + return [ + self.info, + self.mode, + self.size, + self.getpalette(), + self.tobytes()] + + def __setstate__(self, state): + Image.__init__(self) + self.tile = [] + info, mode, size, palette, data = state + self.info = info + self.mode = mode + self.size = size + self.im = core.new(mode, size) + if mode in ("L", "P") and palette: + self.putpalette(palette) + self.frombytes(data) + + def tobytes(self, encoder_name="raw", *args): + """ + Return image as a bytes object. + + .. warning:: + + This method returns the raw image data from the internal + storage. For compressed image data (e.g. PNG, JPEG) use + :meth:`~.save`, with a BytesIO parameter for in-memory + data. + + :param encoder_name: What encoder to use. The default is to + use the standard "raw" encoder. + :param args: Extra arguments to the encoder. + :rtype: A bytes object. + """ + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if encoder_name == "raw" and args == (): + args = self.mode + + self.load() + + # unpack data + e = _getencoder(self.mode, encoder_name, args) + e.setimage(self.im) + + bufsize = max(65536, self.size[0] * 4) # see RawEncode.c + + data = [] + while True: + l, s, d = e.encode(bufsize) + data.append(d) + if s: + break + if s < 0: + raise RuntimeError("encoder error %d in tobytes" % s) + + return b"".join(data) + + def tostring(self, *args, **kw): + raise NotImplementedError("tostring() has been removed. " + "Please call tobytes() instead.") + + def tobitmap(self, name="image"): + """ + Returns the image converted to an X11 bitmap. + + .. note:: This method only works for mode "1" images. + + :param name: The name prefix to use for the bitmap variables. + :returns: A string containing an X11 bitmap. + :raises ValueError: If the mode is not "1" + """ + + self.load() + if self.mode != "1": + raise ValueError("not a bitmap") + data = self.tobytes("xbm") + return b"".join([ + ("#define %s_width %d\n" % (name, self.size[0])).encode('ascii'), + ("#define %s_height %d\n" % (name, self.size[1])).encode('ascii'), + ("static char %s_bits[] = {\n" % name).encode('ascii'), data, b"};" + ]) + + def frombytes(self, data, decoder_name="raw", *args): + """ + Loads this image with pixel data from a bytes object. + + This method is similar to the :py:func:`~PIL.Image.frombytes` function, + but loads data into this image instead of creating a new image object. + """ + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + # default format + if decoder_name == "raw" and args == (): + args = self.mode + + # unpack data + d = _getdecoder(self.mode, decoder_name, args) + d.setimage(self.im) + s = d.decode(data) + + if s[0] >= 0: + raise ValueError("not enough image data") + if s[1] != 0: + raise ValueError("cannot decode image data") + + def fromstring(self, *args, **kw): + raise NotImplementedError("fromstring() has been removed. " + "Please call frombytes() instead.") + + def load(self): + """ + Allocates storage for the image and loads the pixel data. In + normal cases, you don't need to call this method, since the + Image class automatically loads an opened image when it is + accessed for the first time. + + This method will close the file associated with the image. See + :ref:`file-handling` for more information. + + :returns: An image access object. + :rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess` + """ + if self.im and self.palette and self.palette.dirty: + # realize palette + self.im.putpalette(*self.palette.getdata()) + self.palette.dirty = 0 + self.palette.mode = "RGB" + self.palette.rawmode = None + if "transparency" in self.info: + if isinstance(self.info["transparency"], int): + self.im.putpalettealpha(self.info["transparency"], 0) + else: + self.im.putpalettealphas(self.info["transparency"]) + self.palette.mode = "RGBA" + + if self.im: + if HAS_CFFI and USE_CFFI_ACCESS: + if self.pyaccess: + return self.pyaccess + from . import PyAccess + self.pyaccess = PyAccess.new(self, self.readonly) + if self.pyaccess: + return self.pyaccess + return self.im.pixel_access(self.readonly) + + def verify(self): + """ + Verifies the contents of a file. For data read from a file, this + method attempts to determine if the file is broken, without + actually decoding the image data. If this method finds any + problems, it raises suitable exceptions. If you need to load + the image after using this method, you must reopen the image + file. + """ + pass + + def convert(self, mode=None, matrix=None, dither=None, + palette=WEB, colors=256): + """ + Returns a converted copy of this image. For the "P" mode, this + method translates pixels through the palette. If mode is + omitted, a mode is chosen so that all information in the image + and the palette can be represented without a palette. + + The current version supports all possible conversions between + "L", "RGB" and "CMYK." The **matrix** argument only supports "L" + and "RGB". + + When translating a color image to black and white (mode "L"), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + + The default method of converting a greyscale ("L") or "RGB" + image into a bilevel (mode "1") image uses Floyd-Steinberg + dither to approximate the original image luminosity levels. If + dither is NONE, all non-zero values are set to 255 (white). To + use other thresholds, use the :py:meth:`~PIL.Image.Image.point` + method. + + :param mode: The requested mode. See: :ref:`concept-modes`. + :param matrix: An optional conversion matrix. If given, this + should be 4- or 12-tuple containing floating point values. + :param dither: Dithering method, used when converting from + mode "RGB" to "P" or from "RGB" or "L" to "1". + Available methods are NONE or FLOYDSTEINBERG (default). + :param palette: Palette to use when converting from mode "RGB" + to "P". Available palettes are WEB or ADAPTIVE. + :param colors: Number of colors to use for the ADAPTIVE palette. + Defaults to 256. + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + self.load() + + if not mode and self.mode == "P": + # determine default mode + if self.palette: + mode = self.palette.mode + else: + mode = "RGB" + if not mode or (mode == self.mode and not matrix): + return self.copy() + + if matrix: + # matrix conversion + if mode not in ("L", "RGB"): + raise ValueError("illegal conversion") + im = self.im.convert_matrix(mode, matrix) + return self._new(im) + + if mode == "P" and self.mode == "RGBA": + return self.quantize(colors) + + trns = None + delete_trns = False + # transparency handling + if "transparency" in self.info and \ + self.info['transparency'] is not None: + if self.mode in ('L', 'RGB') and mode == 'RGBA': + # Use transparent conversion to promote from transparent + # color to an alpha channel. + new_im = self._new(self.im.convert_transparent( + mode, self.info['transparency'])) + del(new_im.info['transparency']) + return new_im + elif self.mode in ('L', 'RGB', 'P') and mode in ('L', 'RGB', 'P'): + t = self.info['transparency'] + if isinstance(t, bytes): + # Dragons. This can't be represented by a single color + warnings.warn('Palette images with Transparency ' + + ' expressed in bytes should be converted ' + + 'to RGBA images') + delete_trns = True + else: + # get the new transparency color. + # use existing conversions + trns_im = Image()._new(core.new(self.mode, (1, 1))) + if self.mode == 'P': + trns_im.putpalette(self.palette) + if isinstance(t, tuple): + try: + t = trns_im.palette.getcolor(t) + except: + raise ValueError("Couldn't allocate a palette " + "color for transparency") + trns_im.putpixel((0, 0), t) + + if mode in ('L', 'RGB'): + trns_im = trns_im.convert(mode) + else: + # can't just retrieve the palette number, got to do it + # after quantization. + trns_im = trns_im.convert('RGB') + trns = trns_im.getpixel((0, 0)) + + elif self.mode == 'P' and mode == 'RGBA': + t = self.info['transparency'] + delete_trns = True + + if isinstance(t, bytes): + self.im.putpalettealphas(t) + elif isinstance(t, int): + self.im.putpalettealpha(t, 0) + else: + raise ValueError("Transparency for P mode should" + + " be bytes or int") + + if mode == "P" and palette == ADAPTIVE: + im = self.im.quantize(colors) + new = self._new(im) + from . import ImagePalette + new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB")) + if delete_trns: + # This could possibly happen if we requantize to fewer colors. + # The transparency would be totally off in that case. + del(new.info['transparency']) + if trns is not None: + try: + new.info['transparency'] = new.palette.getcolor(trns) + except: + # if we can't make a transparent color, don't leave the old + # transparency hanging around to mess us up. + del(new.info['transparency']) + warnings.warn("Couldn't allocate palette entry " + + "for transparency") + return new + + # colorspace conversion + if dither is None: + dither = FLOYDSTEINBERG + + try: + im = self.im.convert(mode, dither) + except ValueError: + try: + # normalize source image and try again + im = self.im.convert(getmodebase(self.mode)) + im = im.convert(mode, dither) + except KeyError: + raise ValueError("illegal conversion") + + new_im = self._new(im) + if delete_trns: + # crash fail if we leave a bytes transparency in an rgb/l mode. + del(new_im.info['transparency']) + if trns is not None: + if new_im.mode == 'P': + try: + new_im.info['transparency'] = new_im.palette.getcolor(trns) + except: + del(new_im.info['transparency']) + warnings.warn("Couldn't allocate palette entry " + + "for transparency") + else: + new_im.info['transparency'] = trns + return new_im + + def quantize(self, colors=256, method=None, kmeans=0, palette=None): + """ + Convert the image to 'P' mode with the specified number + of colors. + + :param colors: The desired number of colors, <= 256 + :param method: 0 = median cut + 1 = maximum coverage + 2 = fast octree + 3 = libimagequant + :param kmeans: Integer + :param palette: Quantize to the palette of given :py:class:`PIL.Image.Image`. + :returns: A new image + + """ + + self.load() + + if method is None: + # defaults: + method = 0 + if self.mode == 'RGBA': + method = 2 + + if self.mode == 'RGBA' and method not in (2, 3): + # Caller specified an invalid mode. + raise ValueError( + 'Fast Octree (method == 2) and libimagequant (method == 3) ' + + 'are the only valid methods for quantizing RGBA images') + + if palette: + # use palette from reference image + palette.load() + if palette.mode != "P": + raise ValueError("bad mode for palette image") + if self.mode != "RGB" and self.mode != "L": + raise ValueError( + "only RGB or L mode images can be quantized to a palette" + ) + im = self.im.convert("P", 1, palette.im) + return self._new(im) + + return self._new(self.im.quantize(colors, method, kmeans)) + + def copy(self): + """ + Copies this image. Use this method if you wish to paste things + into an image, but still retain the original. + + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + self.load() + return self._new(self.im.copy()) + + __copy__ = copy + + def crop(self, box=None): + """ + Returns a rectangular region from this image. The box is a + 4-tuple defining the left, upper, right, and lower pixel + coordinate. See :ref:`coordinate-system`. + + Note: Prior to Pillow 3.4.0, this was a lazy operation. + + :param box: The crop rectangle, as a (left, upper, right, lower)-tuple. + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if box is None: + return self.copy() + + self.load() + return self._new(self._crop(self.im, box)) + + def _crop(self, im, box): + """ + Returns a rectangular region from the core image object im. + + This is equivalent to calling im.crop((x0, y0, x1, y1)), but + includes additional sanity checks. + + :param im: a core image object + :param box: The crop rectangle, as a (left, upper, right, lower)-tuple. + :returns: A core image object. + """ + + x0, y0, x1, y1 = map(int, map(round, box)) + + if x1 < x0: + x1 = x0 + if y1 < y0: + y1 = y0 + + _decompression_bomb_check((x1, y1)) + + return im.crop((x0, y0, x1, y1)) + + def draft(self, mode, size): + """ + Configures the image file loader so it returns a version of the + image that as closely as possible matches the given mode and + size. For example, you can use this method to convert a color + JPEG to greyscale while loading it, or to extract a 128x192 + version from a PCD file. + + Note that this method modifies the :py:class:`~PIL.Image.Image` object + in place. If the image has already been loaded, this method has no + effect. + + Note: This method is not implemented for most images. It is + currently implemented only for JPEG and PCD images. + + :param mode: The requested mode. + :param size: The requested size. + """ + pass + + def _expand(self, xmargin, ymargin=None): + if ymargin is None: + ymargin = xmargin + self.load() + return self._new(self.im.expand(xmargin, ymargin, 0)) + + def filter(self, filter): + """ + Filters this image using the given filter. For a list of + available filters, see the :py:mod:`~PIL.ImageFilter` module. + + :param filter: Filter kernel. + :returns: An :py:class:`~PIL.Image.Image` object. """ + + from . import ImageFilter + + self.load() + + if isinstance(filter, Callable): + filter = filter() + if not hasattr(filter, "filter"): + raise TypeError("filter argument should be ImageFilter.Filter " + + "instance or class") + + multiband = isinstance(filter, ImageFilter.MultibandFilter) + if self.im.bands == 1 or multiband: + return self._new(filter.filter(self.im)) + + ims = [] + for c in range(self.im.bands): + ims.append(self._new(filter.filter(self.im.getband(c)))) + return merge(self.mode, ims) + + def getbands(self): + """ + Returns a tuple containing the name of each band in this image. + For example, **getbands** on an RGB image returns ("R", "G", "B"). + + :returns: A tuple containing band names. + :rtype: tuple + """ + return ImageMode.getmode(self.mode).bands + + def getbbox(self): + """ + Calculates the bounding box of the non-zero regions in the + image. + + :returns: The bounding box is returned as a 4-tuple defining the + left, upper, right, and lower pixel coordinate. See + :ref:`coordinate-system`. If the image is completely empty, this + method returns None. + + """ + + self.load() + return self.im.getbbox() + + def getcolors(self, maxcolors=256): + """ + Returns a list of colors used in this image. + + :param maxcolors: Maximum number of colors. If this number is + exceeded, this method returns None. The default limit is + 256 colors. + :returns: An unsorted list of (count, pixel) values. + """ + + self.load() + if self.mode in ("1", "L", "P"): + h = self.im.histogram() + out = [] + for i in range(256): + if h[i]: + out.append((h[i], i)) + if len(out) > maxcolors: + return None + return out + return self.im.getcolors(maxcolors) + + def getdata(self, band=None): + """ + Returns the contents of this image as a sequence object + containing pixel values. The sequence object is flattened, so + that values for line one follow directly after the values of + line zero, and so on. + + Note that the sequence object returned by this method is an + internal PIL data type, which only supports certain sequence + operations. To convert it to an ordinary sequence (e.g. for + printing), use **list(im.getdata())**. + + :param band: What band to return. The default is to return + all bands. To return a single band, pass in the index + value (e.g. 0 to get the "R" band from an "RGB" image). + :returns: A sequence-like object. + """ + + self.load() + if band is not None: + return self.im.getband(band) + return self.im # could be abused + + def getextrema(self): + """ + Gets the the minimum and maximum pixel values for each band in + the image. + + :returns: For a single-band image, a 2-tuple containing the + minimum and maximum pixel value. For a multi-band image, + a tuple containing one 2-tuple for each band. + """ + + self.load() + if self.im.bands > 1: + extrema = [] + for i in range(self.im.bands): + extrema.append(self.im.getband(i).getextrema()) + return tuple(extrema) + return self.im.getextrema() + + def getim(self): + """ + Returns a capsule that points to the internal image memory. + + :returns: A capsule object. + """ + + self.load() + return self.im.ptr + + def getpalette(self): + """ + Returns the image palette as a list. + + :returns: A list of color values [r, g, b, ...], or None if the + image has no palette. + """ + + self.load() + try: + if py3: + return list(self.im.getpalette()) + else: + return [i8(c) for c in self.im.getpalette()] + except ValueError: + return None # no palette + + def getpixel(self, xy): + """ + Returns the pixel value at a given position. + + :param xy: The coordinate, given as (x, y). See + :ref:`coordinate-system`. + :returns: The pixel value. If the image is a multi-layer image, + this method returns a tuple. + """ + + self.load() + if self.pyaccess: + return self.pyaccess.getpixel(xy) + return self.im.getpixel(xy) + + def getprojection(self): + """ + Get projection to x and y axes + + :returns: Two sequences, indicating where there are non-zero + pixels along the X-axis and the Y-axis, respectively. + """ + + self.load() + x, y = self.im.getprojection() + return [i8(c) for c in x], [i8(c) for c in y] + + def histogram(self, mask=None, extrema=None): + """ + Returns a histogram for the image. The histogram is returned as + a list of pixel counts, one for each pixel value in the source + image. If the image has more than one band, the histograms for + all bands are concatenated (for example, the histogram for an + "RGB" image contains 768 values). + + A bilevel image (mode "1") is treated as a greyscale ("L") image + by this method. + + If a mask is provided, the method returns a histogram for those + parts of the image where the mask image is non-zero. The mask + image must have the same size as the image, and be either a + bi-level image (mode "1") or a greyscale image ("L"). + + :param mask: An optional mask. + :returns: A list containing pixel counts. + """ + self.load() + if mask: + mask.load() + return self.im.histogram((0, 0), mask.im) + if self.mode in ("I", "F"): + if extrema is None: + extrema = self.getextrema() + return self.im.histogram(extrema) + return self.im.histogram() + + def offset(self, xoffset, yoffset=None): + raise NotImplementedError("offset() has been removed. " + "Please call ImageChops.offset() instead.") + + def paste(self, im, box=None, mask=None): + """ + Pastes another image into this image. The box argument is either + a 2-tuple giving the upper left corner, a 4-tuple defining the + left, upper, right, and lower pixel coordinate, or None (same as + (0, 0)). See :ref:`coordinate-system`. If a 4-tuple is given, the size + of the pasted image must match the size of the region. + + If the modes don't match, the pasted image is converted to the mode of + this image (see the :py:meth:`~PIL.Image.Image.convert` method for + details). + + Instead of an image, the source can be a integer or tuple + containing pixel values. The method then fills the region + with the given color. When creating RGB images, you can + also use color strings as supported by the ImageColor module. + + If a mask is given, this method updates only the regions + indicated by the mask. You can use either "1", "L" or "RGBA" + images (in the latter case, the alpha band is used as mask). + Where the mask is 255, the given image is copied as is. Where + the mask is 0, the current value is preserved. Intermediate + values will mix the two images together, including their alpha + channels if they have them. + + See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to + combine images with respect to their alpha channels. + + :param im: Source image or pixel value (integer or tuple). + :param box: An optional 4-tuple giving the region to paste into. + If a 2-tuple is used instead, it's treated as the upper left + corner. If omitted or None, the source is pasted into the + upper left corner. + + If an image is given as the second argument and there is no + third, the box defaults to (0, 0), and the second argument + is interpreted as a mask image. + :param mask: An optional mask image. + """ + + if isImageType(box) and mask is None: + # abbreviated paste(im, mask) syntax + mask = box + box = None + + if box is None: + box = (0, 0) + + if len(box) == 2: + # upper left corner given; get size from image or mask + if isImageType(im): + size = im.size + elif isImageType(mask): + size = mask.size + else: + # FIXME: use self.size here? + raise ValueError( + "cannot determine region size; use 4-item box" + ) + box += (box[0]+size[0], box[1]+size[1]) + + if isStringType(im): + from . import ImageColor + im = ImageColor.getcolor(im, self.mode) + + elif isImageType(im): + im.load() + if self.mode != im.mode: + if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"): + # should use an adapter for this! + im = im.convert(self.mode) + im = im.im + + self._ensure_mutable() + + if mask: + mask.load() + self.im.paste(im, box, mask.im) + else: + self.im.paste(im, box) + + def alpha_composite(self, im, dest=(0, 0), source=(0, 0)): + """ 'In-place' analog of Image.alpha_composite. Composites an image + onto this image. + + :param im: image to composite over this one + :param dest: Optional 2 tuple (left, top) specifying the upper + left corner in this (destination) image. + :param source: Optional 2 (left, top) tuple for the upper left + corner in the overlay source image, or 4 tuple (left, top, right, + bottom) for the bounds of the source rectangle + + Performance Note: Not currently implemented in-place in the core layer. + """ + + if not isinstance(source, (list, tuple)): + raise ValueError("Source must be a tuple") + if not isinstance(dest, (list, tuple)): + raise ValueError("Destination must be a tuple") + if not len(source) in (2, 4): + raise ValueError("Source must be a 2 or 4-tuple") + if not len(dest) == 2: + raise ValueError("Destination must be a 2-tuple") + if min(source) < 0: + raise ValueError("Source must be non-negative") + if min(dest) < 0: + raise ValueError("Destination must be non-negative") + + if len(source) == 2: + source = source + im.size + + # over image, crop if it's not the whole thing. + if source == (0, 0) + im.size: + overlay = im + else: + overlay = im.crop(source) + + # target for the paste + box = dest + (dest[0] + overlay.width, dest[1] + overlay.height) + + # destination image. don't copy if we're using the whole image. + if box == (0, 0) + self.size: + background = self + else: + background = self.crop(box) + + result = alpha_composite(background, overlay) + self.paste(result, box) + + def point(self, lut, mode=None): + """ + Maps this image through a lookup table or function. + + :param lut: A lookup table, containing 256 (or 65536 if + self.mode=="I" and mode == "L") values per band in the + image. A function can be used instead, it should take a + single argument. The function is called once for each + possible pixel value, and the resulting table is applied to + all bands of the image. + :param mode: Output mode (default is same as input). In the + current version, this can only be used if the source image + has mode "L" or "P", and the output has mode "1" or the + source image mode is "I" and the output mode is "L". + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + self.load() + + if isinstance(lut, ImagePointHandler): + return lut.point(self) + + if callable(lut): + # if it isn't a list, it should be a function + if self.mode in ("I", "I;16", "F"): + # check if the function can be used with point_transform + # UNDONE wiredfool -- I think this prevents us from ever doing + # a gamma function point transform on > 8bit images. + scale, offset = _getscaleoffset(lut) + return self._new(self.im.point_transform(scale, offset)) + # for other modes, convert the function to a table + lut = [lut(i) for i in range(256)] * self.im.bands + + if self.mode == "F": + # FIXME: _imaging returns a confusing error message for this case + raise ValueError("point operation not supported for this mode") + + return self._new(self.im.point(lut, mode)) + + def putalpha(self, alpha): + """ + Adds or replaces the alpha layer in this image. If the image + does not have an alpha layer, it's converted to "LA" or "RGBA". + The new layer must be either "L" or "1". + + :param alpha: The new alpha layer. This can either be an "L" or "1" + image having the same size as this image, or an integer or + other color value. + """ + + self._ensure_mutable() + + if self.mode not in ("LA", "RGBA"): + # attempt to promote self to a matching alpha mode + try: + mode = getmodebase(self.mode) + "A" + try: + self.im.setmode(mode) + except (AttributeError, ValueError): + # do things the hard way + im = self.im.convert(mode) + if im.mode not in ("LA", "RGBA"): + raise ValueError # sanity check + self.im = im + self.pyaccess = None + self.mode = self.im.mode + except (KeyError, ValueError): + raise ValueError("illegal image mode") + + if self.mode == "LA": + band = 1 + else: + band = 3 + + if isImageType(alpha): + # alpha layer + if alpha.mode not in ("1", "L"): + raise ValueError("illegal image mode") + alpha.load() + if alpha.mode == "1": + alpha = alpha.convert("L") + else: + # constant alpha + try: + self.im.fillband(band, alpha) + except (AttributeError, ValueError): + # do things the hard way + alpha = new("L", self.size, alpha) + else: + return + + self.im.putband(alpha.im, band) + + def putdata(self, data, scale=1.0, offset=0.0): + """ + Copies pixel data to this image. This method copies data from a + sequence object into the image, starting at the upper left + corner (0, 0), and continuing until either the image or the + sequence ends. The scale and offset values are used to adjust + the sequence values: **pixel = value*scale + offset**. + + :param data: A sequence object. + :param scale: An optional scale value. The default is 1.0. + :param offset: An optional offset value. The default is 0.0. + """ + + self._ensure_mutable() + + self.im.putdata(data, scale, offset) + + def putpalette(self, data, rawmode="RGB"): + """ + Attaches a palette to this image. The image must be a "P" or + "L" image, and the palette sequence must contain 768 integer + values, where each group of three values represent the red, + green, and blue values for the corresponding pixel + index. Instead of an integer sequence, you can use an 8-bit + string. + + :param data: A palette sequence (either a list or a string). + :param rawmode: The raw mode of the palette. + """ + from . import ImagePalette + + if self.mode not in ("L", "P"): + raise ValueError("illegal image mode") + self.load() + if isinstance(data, ImagePalette.ImagePalette): + palette = ImagePalette.raw(data.rawmode, data.palette) + else: + if not isinstance(data, bytes): + if py3: + data = bytes(data) + else: + data = "".join(chr(x) for x in data) + palette = ImagePalette.raw(rawmode, data) + self.mode = "P" + self.palette = palette + self.palette.mode = "RGB" + self.load() # install new palette + + def putpixel(self, xy, value): + """ + Modifies the pixel at the given position. The color is given as + a single numerical value for single-band images, and a tuple for + multi-band images. + + Note that this method is relatively slow. For more extensive changes, + use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw` + module instead. + + See: + + * :py:meth:`~PIL.Image.Image.paste` + * :py:meth:`~PIL.Image.Image.putdata` + * :py:mod:`~PIL.ImageDraw` + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :param value: The pixel value. + """ + + if self.readonly: + self._copy() + self.load() + + if self.pyaccess: + return self.pyaccess.putpixel(xy, value) + return self.im.putpixel(xy, value) + + def remap_palette(self, dest_map, source_palette=None): + """ + Rewrites the image to reorder the palette. + + :param dest_map: A list of indexes into the original palette. + e.g. [1,0] would swap a two item palette, and list(range(255)) + is the identity transform. + :param source_palette: Bytes or None. + :returns: An :py:class:`~PIL.Image.Image` object. + + """ + from . import ImagePalette + + if self.mode not in ("L", "P"): + raise ValueError("illegal image mode") + + if source_palette is None: + if self.mode == "P": + real_source_palette = self.im.getpalette("RGB")[:768] + else: # L-mode + real_source_palette = bytearray(i//3 for i in range(768)) + else: + real_source_palette = source_palette + + palette_bytes = b"" + new_positions = [0]*256 + + # pick only the used colors from the palette + for i, oldPosition in enumerate(dest_map): + palette_bytes += real_source_palette[oldPosition*3:oldPosition*3+3] + new_positions[oldPosition] = i + + # replace the palette color id of all pixel with the new id + + # Palette images are [0..255], mapped through a 1 or 3 + # byte/color map. We need to remap the whole image + # from palette 1 to palette 2. New_positions is + # an array of indexes into palette 1. Palette 2 is + # palette 1 with any holes removed. + + # We're going to leverage the convert mechanism to use the + # C code to remap the image from palette 1 to palette 2, + # by forcing the source image into 'L' mode and adding a + # mapping 'L' mode palette, then converting back to 'L' + # sans palette thus converting the image bytes, then + # assigning the optimized RGB palette. + + # perf reference, 9500x4000 gif, w/~135 colors + # 14 sec prepatch, 1 sec postpatch with optimization forced. + + mapping_palette = bytearray(new_positions) + + m_im = self.copy() + m_im.mode = 'P' + + m_im.palette = ImagePalette.ImagePalette("RGB", + palette=mapping_palette*3, + size=768) + # possibly set palette dirty, then + # m_im.putpalette(mapping_palette, 'L') # converts to 'P' + # or just force it. + # UNDONE -- this is part of the general issue with palettes + m_im.im.putpalette(*m_im.palette.getdata()) + + m_im = m_im.convert('L') + + # Internally, we require 768 bytes for a palette. + new_palette_bytes = (palette_bytes + + (768 - len(palette_bytes)) * b'\x00') + m_im.putpalette(new_palette_bytes) + m_im.palette = ImagePalette.ImagePalette("RGB", + palette=palette_bytes, + size=len(palette_bytes)) + + return m_im + + def resize(self, size, resample=NEAREST, box=None): + """ + Returns a resized copy of this image. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param resample: An optional resampling filter. This can be + one of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BOX`, + :py:attr:`PIL.Image.BILINEAR`, :py:attr:`PIL.Image.HAMMING`, + :py:attr:`PIL.Image.BICUBIC` or :py:attr:`PIL.Image.LANCZOS`. + If omitted, or if the image has mode "1" or "P", it is + set :py:attr:`PIL.Image.NEAREST`. + See: :ref:`concept-filters`. + :param box: An optional 4-tuple of floats giving the region + of the source image which should be scaled. + The values should be within (0, 0, width, height) rectangle. + If omitted or None, the entire source is used. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if resample not in ( + NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING, + ): + raise ValueError("unknown resampling filter") + + size = tuple(size) + + if box is None: + box = (0, 0) + self.size + else: + box = tuple(box) + + if self.size == size and box == (0, 0) + self.size: + return self.copy() + + if self.mode in ("1", "P"): + resample = NEAREST + + if self.mode == 'LA': + return self.convert('La').resize(size, resample, box).convert('LA') + + if self.mode == 'RGBA': + return self.convert('RGBa').resize(size, resample, box).convert('RGBA') + + self.load() + + return self._new(self.im.resize(size, resample, box)) + + def rotate(self, angle, resample=NEAREST, expand=0, center=None, + translate=None, fillcolor=None): + """ + Returns a rotated copy of this image. This method returns a + copy of this image, rotated the given number of degrees counter + clockwise around its centre. + + :param angle: In degrees counter clockwise. + :param resample: An optional resampling filter. This can be + one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour), + :py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2 + environment), or :py:attr:`PIL.Image.BICUBIC` + (cubic spline interpolation in a 4x4 environment). + If omitted, or if the image has mode "1" or "P", it is + set :py:attr:`PIL.Image.NEAREST`. See :ref:`concept-filters`. + :param expand: Optional expansion flag. If true, expands the output + image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the + input image. Note that the expand flag assumes rotation around + the center and no translation. + :param center: Optional center of rotation (a 2-tuple). Origin is + the upper left corner. Default is the center of the image. + :param translate: An optional post-rotate translation (a 2-tuple). + :param fillcolor: An optional color for area outside the rotated image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + angle = angle % 360.0 + + # Fast paths regardless of filter, as long as we're not + # translating or changing the center. + if not (center or translate): + if angle == 0: + return self.copy() + if angle == 180: + return self.transpose(ROTATE_180) + if angle == 90 and expand: + return self.transpose(ROTATE_90) + if angle == 270 and expand: + return self.transpose(ROTATE_270) + + # Calculate the affine matrix. Note that this is the reverse + # transformation (from destination image to source) because we + # want to interpolate the (discrete) destination pixel from + # the local area around the (floating) source pixel. + + # The matrix we actually want (note that it operates from the right): + # (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx) + # (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy) + # (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1) + + # The reverse matrix is thus: + # (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx) + # (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty) + # (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1) + + # In any case, the final translation may be updated at the end to + # compensate for the expand flag. + + w, h = self.size + + if translate is None: + post_trans = (0, 0) + else: + post_trans = translate + if center is None: + rotn_center = (w / 2.0, h / 2.0) # FIXME These should be rounded to ints? + else: + rotn_center = center + + angle = - math.radians(angle) + matrix = [ + round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, + round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0 + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a*x + b*y + c, d*x + e*y + f + + matrix[2], matrix[5] = transform(-rotn_center[0] - post_trans[0], + -rotn_center[1] - post_trans[1], matrix) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + + if expand: + # calculate output size + xx = [] + yy = [] + for x, y in ((0, 0), (w, 0), (w, h), (0, h)): + x, y = transform(x, y, matrix) + xx.append(x) + yy.append(y) + nw = int(math.ceil(max(xx)) - math.floor(min(xx))) + nh = int(math.ceil(max(yy)) - math.floor(min(yy))) + + # We multiply a translation matrix from the right. Because of its + # special form, this is the same as taking the image of the + # translation vector as new translation vector. + matrix[2], matrix[5] = transform(-(nw - w) / 2.0, + -(nh - h) / 2.0, + matrix) + w, h = nw, nh + + return self.transform((w, h), AFFINE, matrix, resample, fillcolor=fillcolor) + + def save(self, fp, format=None, **params): + """ + Saves this image under the given filename. If no format is + specified, the format to use is determined from the filename + extension, if possible. + + Keyword options can be used to provide additional instructions + to the writer. If a writer doesn't recognise an option, it is + silently ignored. The available options are described in the + :doc:`image format documentation + <../handbook/image-file-formats>` for each writer. + + You can use a file object instead of a filename. In this case, + you must always specify the format. The file object must + implement the ``seek``, ``tell``, and ``write`` + methods, and be opened in binary mode. + + :param fp: A filename (string), pathlib.Path object or file object. + :param format: Optional format override. If omitted, the + format to use is determined from the filename extension. + If a file object was used instead of a filename, this + parameter should always be used. + :param params: Extra parameters to the image writer. + :returns: None + :exception KeyError: If the output format could not be determined + from the file name. Use the format option to solve this. + :exception IOError: If the file could not be written. The file + may have been created, and may contain partial data. + """ + + filename = "" + open_fp = False + if isPath(fp): + filename = fp + open_fp = True + elif HAS_PATHLIB and isinstance(fp, Path): + filename = str(fp) + open_fp = True + if not filename and hasattr(fp, "name") and isPath(fp.name): + # only set the name for metadata purposes + filename = fp.name + + # may mutate self! + self.load() + + save_all = params.pop('save_all', False) + self.encoderinfo = params + self.encoderconfig = () + + preinit() + + ext = os.path.splitext(filename)[1].lower() + + if not format: + if ext not in EXTENSION: + init() + try: + format = EXTENSION[ext] + except KeyError: + raise ValueError('unknown file extension: {}'.format(ext)) + + if format.upper() not in SAVE: + init() + if save_all: + save_handler = SAVE_ALL[format.upper()] + else: + save_handler = SAVE[format.upper()] + + if open_fp: + if params.get('append', False): + fp = builtins.open(filename, "r+b") + else: + # Open also for reading ("+"), because TIFF save_all + # writer needs to go back and edit the written data. + fp = builtins.open(filename, "w+b") + + try: + save_handler(self, fp, filename) + finally: + # do what we can to clean up + if open_fp: + fp.close() + + def seek(self, frame): + """ + Seeks to the given frame in this sequence file. If you seek + beyond the end of the sequence, the method raises an + **EOFError** exception. When a sequence file is opened, the + library automatically seeks to frame 0. + + Note that in the current version of the library, most sequence + formats only allows you to seek to the next frame. + + See :py:meth:`~PIL.Image.Image.tell`. + + :param frame: Frame number, starting at 0. + :exception EOFError: If the call attempts to seek beyond the end + of the sequence. + """ + + # overridden by file handlers + if frame != 0: + raise EOFError + + def show(self, title=None, command=None): + """ + Displays this image. This method is mainly intended for + debugging purposes. + + On Unix platforms, this method saves the image to a temporary + PPM file, and calls either the **xv** utility or the **display** + utility, depending on which one can be found. + + On macOS, this method saves the image to a temporary BMP file, and + opens it with the native Preview application. + + On Windows, it saves the image to a temporary BMP file, and uses + the standard BMP display utility to show it (usually Paint). + + :param title: Optional title to use for the image window, + where possible. + :param command: command used to show the image + """ + + _show(self, title=title, command=command) + + def split(self): + """ + Split this image into individual bands. This method returns a + tuple of individual image bands from an image. For example, + splitting an "RGB" image creates three new images each + containing a copy of one of the original bands (red, green, + blue). + + If you need only one band, :py:meth:`~PIL.Image.Image.getchannel` + method can be more convenient and faster. + + :returns: A tuple containing bands. + """ + + self.load() + if self.im.bands == 1: + ims = [self.copy()] + else: + ims = map(self._new, self.im.split()) + return tuple(ims) + + def getchannel(self, channel): + """ + Returns an image containing a single channel of the source image. + + :param channel: What channel to return. Could be index + (0 for "R" channel of "RGB") or channel name + ("A" for alpha channel of "RGBA"). + :returns: An image in "L" mode. + + .. versionadded:: 4.3.0 + """ + self.load() + + if isStringType(channel): + try: + channel = self.getbands().index(channel) + except ValueError: + raise ValueError( + 'The image has no channel "{}"'.format(channel)) + + return self._new(self.im.getband(channel)) + + def tell(self): + """ + Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`. + + :returns: Frame number, starting with 0. + """ + return 0 + + def thumbnail(self, size, resample=BICUBIC): + """ + Make this image into a thumbnail. This method modifies the + image to contain a thumbnail version of itself, no larger than + the given size. This method calculates an appropriate thumbnail + size to preserve the aspect of the image, calls the + :py:meth:`~PIL.Image.Image.draft` method to configure the file reader + (where applicable), and finally resizes the image. + + Note that this function modifies the :py:class:`~PIL.Image.Image` + object in place. If you need to use the full resolution image as well, + apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original + image. + + :param size: Requested size. + :param resample: Optional resampling filter. This can be one + of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`, + :py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`. + If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`. + (was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0) + :returns: None + """ + + # preserve aspect ratio + x, y = self.size + if x > size[0]: + y = int(max(y * size[0] / x, 1)) + x = int(size[0]) + if y > size[1]: + x = int(max(x * size[1] / y, 1)) + y = int(size[1]) + size = x, y + + if size == self.size: + return + + self.draft(None, size) + + im = self.resize(size, resample) + + self.im = im.im + self.mode = im.mode + self.size = size + + self.readonly = 0 + self.pyaccess = None + + # FIXME: the different transform methods need further explanation + # instead of bloating the method docs, add a separate chapter. + def transform(self, size, method, data=None, resample=NEAREST, + fill=1, fillcolor=None): + """ + Transforms this image. This method creates a new image with the + given size, and the same mode as the original, and copies data + to the new image using the given transform. + + :param size: The output size. + :param method: The transformation method. This is one of + :py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion), + :py:attr:`PIL.Image.AFFINE` (affine transform), + :py:attr:`PIL.Image.PERSPECTIVE` (perspective transform), + :py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or + :py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals + in one operation). + + It may also be an :py:class:`~PIL.Image.ImageTransformHandler` + object:: + class Example(Image.ImageTransformHandler): + def transform(size, method, data, resample, fill=1): + # Return result + + It may also be an object with a :py:meth:`~method.getdata` method + that returns a tuple supplying new **method** and **data** values:: + class Example(object): + def getdata(self): + method = Image.EXTENT + data = (0, 0, 100, 100) + return method, data + :param data: Extra data to the transformation method. + :param resample: Optional resampling filter. It can be one of + :py:attr:`PIL.Image.NEAREST` (use nearest neighbour), + :py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2 + environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline + interpolation in a 4x4 environment). If omitted, or if the image + has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`. + :param fill: If **method** is an + :py:class:`~PIL.Image.ImageTransformHandler` object, this is one of + the arguments passed to it. Otherwise, it is unused. + :param fillcolor: Optional fill color for the area outside the transform + in the output image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if self.mode == 'LA': + return self.convert('La').transform( + size, method, data, resample, fill, fillcolor).convert('LA') + + if self.mode == 'RGBA': + return self.convert('RGBa').transform( + size, method, data, resample, fill, fillcolor).convert('RGBA') + + if isinstance(method, ImageTransformHandler): + return method.transform(size, self, resample=resample, fill=fill) + + if hasattr(method, "getdata"): + # compatibility w. old-style transform objects + method, data = method.getdata() + + if data is None: + raise ValueError("missing method data") + + im = new(self.mode, size, fillcolor) + if method == MESH: + # list of quads + for box, quad in data: + im.__transformer(box, self, QUAD, quad, resample, + fillcolor is None) + else: + im.__transformer((0, 0)+size, self, method, data, + resample, fillcolor is None) + + return im + + def __transformer(self, box, image, method, data, + resample=NEAREST, fill=1): + w = box[2] - box[0] + h = box[3] - box[1] + + if method == AFFINE: + data = data[0:6] + + elif method == EXTENT: + # convert extent to an affine transform + x0, y0, x1, y1 = data + xs = float(x1 - x0) / w + ys = float(y1 - y0) / h + method = AFFINE + data = (xs, 0, x0, 0, ys, y0) + + elif method == PERSPECTIVE: + data = data[0:8] + + elif method == QUAD: + # quadrilateral warp. data specifies the four corners + # given as NW, SW, SE, and NE. + nw = data[0:2] + sw = data[2:4] + se = data[4:6] + ne = data[6:8] + x0, y0 = nw + As = 1.0 / w + At = 1.0 / h + data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At, + (se[0]-sw[0]-ne[0]+x0)*As*At, + y0, (ne[1]-y0)*As, (sw[1]-y0)*At, + (se[1]-sw[1]-ne[1]+y0)*As*At) + + else: + raise ValueError("unknown transformation method") + + if resample not in (NEAREST, BILINEAR, BICUBIC): + raise ValueError("unknown resampling filter") + + image.load() + + self.load() + + if image.mode in ("1", "P"): + resample = NEAREST + + self.im.transform2(box, image.im, method, data, resample, fill) + + def transpose(self, method): + """ + Transpose image (flip or rotate in 90 degree steps) + + :param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`, + :py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`, + :py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`, + :py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`. + :returns: Returns a flipped or rotated copy of this image. + """ + + self.load() + return self._new(self.im.transpose(method)) + + def effect_spread(self, distance): + """ + Randomly spread pixels in an image. + + :param distance: Distance to spread pixels. + """ + self.load() + return self._new(self.im.effect_spread(distance)) + + def toqimage(self): + """Returns a QImage copy of this image""" + from . import ImageQt + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.toqimage(self) + + def toqpixmap(self): + """Returns a QPixmap copy of this image""" + from . import ImageQt + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.toqpixmap(self) + + +# -------------------------------------------------------------------- +# Abstract handlers. + +class ImagePointHandler(object): + # used as a mixin by point transforms (for use with im.point) + pass + + +class ImageTransformHandler(object): + # used as a mixin by geometry transforms (for use with im.transform) + pass + + +# -------------------------------------------------------------------- +# Factories + +# +# Debugging + +def _wedge(): + """Create greyscale wedge (for debugging only)""" + + return Image()._new(core.wedge("L")) + + +def _check_size(size): + """ + Common check to enforce type and sanity check on size tuples + + :param size: Should be a 2 tuple of (width, height) + :returns: True, or raises a ValueError + """ + + if not isinstance(size, (list, tuple)): + raise ValueError("Size must be a tuple") + if len(size) != 2: + raise ValueError("Size must be a tuple of length 2") + if size[0] < 0 or size[1] < 0: + raise ValueError("Width and height must be >= 0") + + return True + + +def new(mode, size, color=0): + """ + Creates a new image with the given mode and size. + + :param mode: The mode to use for the new image. See: + :ref:`concept-modes`. + :param size: A 2-tuple, containing (width, height) in pixels. + :param color: What color to use for the image. Default is black. + If given, this should be a single integer or floating point value + for single-band modes, and a tuple for multi-band modes (one value + per band). When creating RGB images, you can also use color + strings as supported by the ImageColor module. If the color is + None, the image is not initialised. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + _check_size(size) + + if color is None: + # don't initialize + return Image()._new(core.new(mode, size)) + + if isStringType(color): + # css3-style specifier + + from . import ImageColor + color = ImageColor.getcolor(color, mode) + + return Image()._new(core.fill(mode, size, color)) + + +def frombytes(mode, size, data, decoder_name="raw", *args): + """ + Creates a copy of an image memory from pixel data in a buffer. + + In its simplest form, this function takes three arguments + (mode, size, and unpacked pixel data). + + You can also use any pixel decoder supported by PIL. For more + information on available decoders, see the section + :ref:`Writing Your Own File Decoder `. + + Note that this function decodes pixel data only, not entire images. + If you have an entire image in a string, wrap it in a + :py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load + it. + + :param mode: The image mode. See: :ref:`concept-modes`. + :param size: The image size. + :param data: A byte buffer containing raw data for the given mode. + :param decoder_name: What decoder to use. + :param args: Additional parameters for the given decoder. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + _check_size(size) + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if decoder_name == "raw" and args == (): + args = mode + + im = new(mode, size) + im.frombytes(data, decoder_name, args) + return im + + +def fromstring(*args, **kw): + raise NotImplementedError("fromstring() has been removed. " + + "Please call frombytes() instead.") + + +def frombuffer(mode, size, data, decoder_name="raw", *args): + """ + Creates an image memory referencing pixel data in a byte buffer. + + This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data + in the byte buffer, where possible. This means that changes to the + original buffer object are reflected in this image). Not all modes can + share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK". + + Note that this function decodes pixel data only, not entire images. + If you have an entire image file in a string, wrap it in a + **BytesIO** object, and use :py:func:`~PIL.Image.open` to load it. + + In the current version, the default parameters used for the "raw" decoder + differs from that used for :py:func:`~PIL.Image.frombytes`. This is a + bug, and will probably be fixed in a future release. The current release + issues a warning if you do this; to disable the warning, you should provide + the full set of parameters. See below for details. + + :param mode: The image mode. See: :ref:`concept-modes`. + :param size: The image size. + :param data: A bytes or other buffer object containing raw + data for the given mode. + :param decoder_name: What decoder to use. + :param args: Additional parameters for the given decoder. For the + default encoder ("raw"), it's recommended that you provide the + full set of parameters:: + + frombuffer(mode, size, data, "raw", mode, 0, 1) + + :returns: An :py:class:`~PIL.Image.Image` object. + + .. versionadded:: 1.1.4 + """ + + _check_size(size) + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if decoder_name == "raw": + if args == (): + warnings.warn( + "the frombuffer defaults may change in a future release; " + "for portability, change the call to read:\n" + " frombuffer(mode, size, data, 'raw', mode, 0, 1)", + RuntimeWarning, stacklevel=2 + ) + args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6 + if args[0] in _MAPMODES: + im = new(mode, (1, 1)) + im = im._new( + core.map_buffer(data, size, decoder_name, None, 0, args) + ) + im.readonly = 1 + return im + + return frombytes(mode, size, data, decoder_name, args) + + +def fromarray(obj, mode=None): + """ + Creates an image memory from an object exporting the array interface + (using the buffer protocol). + + If obj is not contiguous, then the tobytes method is called + and :py:func:`~PIL.Image.frombuffer` is used. + + :param obj: Object with array interface + :param mode: Mode to use (will be determined from type if None) + See: :ref:`concept-modes`. + :returns: An image object. + + .. versionadded:: 1.1.6 + """ + arr = obj.__array_interface__ + shape = arr['shape'] + ndim = len(shape) + strides = arr.get('strides', None) + if mode is None: + try: + typekey = (1, 1) + shape[2:], arr['typestr'] + mode, rawmode = _fromarray_typemap[typekey] + except KeyError: + # print(typekey) + raise TypeError("Cannot handle this data type") + else: + rawmode = mode + if mode in ["1", "L", "I", "P", "F"]: + ndmax = 2 + elif mode == "RGB": + ndmax = 3 + else: + ndmax = 4 + if ndim > ndmax: + raise ValueError("Too many dimensions: %d > %d." % (ndim, ndmax)) + + size = shape[1], shape[0] + if strides is not None: + if hasattr(obj, 'tobytes'): + obj = obj.tobytes() + else: + obj = obj.tostring() + + return frombuffer(mode, size, obj, "raw", rawmode, 0, 1) + + +def fromqimage(im): + """Creates an image instance from a QImage image""" + from . import ImageQt + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.fromqimage(im) + + +def fromqpixmap(im): + """Creates an image instance from a QPixmap image""" + from . import ImageQt + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.fromqpixmap(im) + + +_fromarray_typemap = { + # (shape, typestr) => mode, rawmode + # first two members of shape are set to one + ((1, 1), "|b1"): ("1", "1;8"), + ((1, 1), "|u1"): ("L", "L"), + ((1, 1), "|i1"): ("I", "I;8"), + ((1, 1), "u2"): ("I", "I;16B"), + ((1, 1), "i2"): ("I", "I;16BS"), + ((1, 1), "u4"): ("I", "I;32B"), + ((1, 1), "i4"): ("I", "I;32BS"), + ((1, 1), "f4"): ("F", "F;32BF"), + ((1, 1), "f8"): ("F", "F;64BF"), + ((1, 1, 2), "|u1"): ("LA", "LA"), + ((1, 1, 3), "|u1"): ("RGB", "RGB"), + ((1, 1, 4), "|u1"): ("RGBA", "RGBA"), + } + +# shortcuts +_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I") +_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F") + + +def _decompression_bomb_check(size): + if MAX_IMAGE_PIXELS is None: + return + + pixels = size[0] * size[1] + + if pixels > 2 * MAX_IMAGE_PIXELS: + raise DecompressionBombError( + "Image size (%d pixels) exceeds limit of %d pixels, " + "could be decompression bomb DOS attack." % + (pixels, 2 * MAX_IMAGE_PIXELS)) + + if pixels > MAX_IMAGE_PIXELS: + warnings.warn( + "Image size (%d pixels) exceeds limit of %d pixels, " + "could be decompression bomb DOS attack." % + (pixels, MAX_IMAGE_PIXELS), + DecompressionBombWarning) + + +def open(fp, mode="r"): + """ + Opens and identifies the given image file. + + This is a lazy operation; this function identifies the file, but + the file remains open and the actual image data is not read from + the file until you try to process the data (or call the + :py:meth:`~PIL.Image.Image.load` method). See + :py:func:`~PIL.Image.new`. See :ref:`file-handling`. + + :param fp: A filename (string), pathlib.Path object or a file object. + The file object must implement :py:meth:`~file.read`, + :py:meth:`~file.seek`, and :py:meth:`~file.tell` methods, + and be opened in binary mode. + :param mode: The mode. If given, this argument must be "r". + :returns: An :py:class:`~PIL.Image.Image` object. + :exception IOError: If the file cannot be found, or the image cannot be + opened and identified. + """ + + if mode != "r": + raise ValueError("bad mode %r" % mode) + + exclusive_fp = False + filename = "" + if isPath(fp): + filename = fp + elif HAS_PATHLIB and isinstance(fp, Path): + filename = str(fp.resolve()) + + if filename: + fp = builtins.open(filename, "rb") + exclusive_fp = True + + try: + fp.seek(0) + except (AttributeError, io.UnsupportedOperation): + fp = io.BytesIO(fp.read()) + exclusive_fp = True + + prefix = fp.read(16) + + preinit() + + def _open_core(fp, filename, prefix): + for i in ID: + try: + factory, accept = OPEN[i] + if not accept or accept(prefix): + fp.seek(0) + im = factory(fp, filename) + _decompression_bomb_check(im.size) + return im + except (SyntaxError, IndexError, TypeError, struct.error): + # Leave disabled by default, spams the logs with image + # opening failures that are entirely expected. + # logger.debug("", exc_info=True) + continue + return None + + im = _open_core(fp, filename, prefix) + + if im is None: + if init(): + im = _open_core(fp, filename, prefix) + + if im: + im._exclusive_fp = exclusive_fp + return im + + if exclusive_fp: + fp.close() + raise IOError("cannot identify image file %r" + % (filename if filename else fp)) + +# +# Image processing. + + +def alpha_composite(im1, im2): + """ + Alpha composite im2 over im1. + + :param im1: The first image. Must have mode RGBA. + :param im2: The second image. Must have mode RGBA, and the same size as + the first image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + im1.load() + im2.load() + return im1._new(core.alpha_composite(im1.im, im2.im)) + + +def blend(im1, im2, alpha): + """ + Creates a new image by interpolating between two input images, using + a constant alpha.:: + + out = image1 * (1.0 - alpha) + image2 * alpha + + :param im1: The first image. + :param im2: The second image. Must have the same mode and size as + the first image. + :param alpha: The interpolation alpha factor. If alpha is 0.0, a + copy of the first image is returned. If alpha is 1.0, a copy of + the second image is returned. There are no restrictions on the + alpha value. If necessary, the result is clipped to fit into + the allowed output range. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + im1.load() + im2.load() + return im1._new(core.blend(im1.im, im2.im, alpha)) + + +def composite(image1, image2, mask): + """ + Create composite image by blending images using a transparency mask. + + :param image1: The first image. + :param image2: The second image. Must have the same mode and + size as the first image. + :param mask: A mask image. This image can have mode + "1", "L", or "RGBA", and must have the same size as the + other two images. + """ + + image = image2.copy() + image.paste(image1, None, mask) + return image + + +def eval(image, *args): + """ + Applies the function (which should take one argument) to each pixel + in the given image. If the image has more than one band, the same + function is applied to each band. Note that the function is + evaluated once for each possible pixel value, so you cannot use + random components or other generators. + + :param image: The input image. + :param function: A function object, taking one integer argument. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + return image.point(args[0]) + + +def merge(mode, bands): + """ + Merge a set of single band images into a new multiband image. + + :param mode: The mode to use for the output image. See: + :ref:`concept-modes`. + :param bands: A sequence containing one single-band image for + each band in the output image. All bands must have the + same size. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if getmodebands(mode) != len(bands) or "*" in mode: + raise ValueError("wrong number of bands") + for band in bands[1:]: + if band.mode != getmodetype(mode): + raise ValueError("mode mismatch") + if band.size != bands[0].size: + raise ValueError("size mismatch") + for band in bands: + band.load() + return bands[0]._new(core.merge(mode, *[b.im for b in bands])) + + +# -------------------------------------------------------------------- +# Plugin registry + +def register_open(id, factory, accept=None): + """ + Register an image file plugin. This function should not be used + in application code. + + :param id: An image format identifier. + :param factory: An image file factory method. + :param accept: An optional function that can be used to quickly + reject images having another format. + """ + id = id.upper() + ID.append(id) + OPEN[id] = factory, accept + + +def register_mime(id, mimetype): + """ + Registers an image MIME type. This function should not be used + in application code. + + :param id: An image format identifier. + :param mimetype: The image MIME type for this format. + """ + MIME[id.upper()] = mimetype + + +def register_save(id, driver): + """ + Registers an image save function. This function should not be + used in application code. + + :param id: An image format identifier. + :param driver: A function to save images in this format. + """ + SAVE[id.upper()] = driver + + +def register_save_all(id, driver): + """ + Registers an image function to save all the frames + of a multiframe format. This function should not be + used in application code. + + :param id: An image format identifier. + :param driver: A function to save images in this format. + """ + SAVE_ALL[id.upper()] = driver + + +def register_extension(id, extension): + """ + Registers an image extension. This function should not be + used in application code. + + :param id: An image format identifier. + :param extension: An extension used for this format. + """ + EXTENSION[extension.lower()] = id.upper() + + +def register_extensions(id, extensions): + """ + Registers image extensions. This function should not be + used in application code. + + :param id: An image format identifier. + :param extensions: A list of extensions used for this format. + """ + for extension in extensions: + register_extension(id, extension) + + +def registered_extensions(): + """ + Returns a dictionary containing all file extensions belonging + to registered plugins + """ + if not EXTENSION: + init() + return EXTENSION + + +def register_decoder(name, decoder): + """ + Registers an image decoder. This function should not be + used in application code. + + :param name: The name of the decoder + :param decoder: A callable(mode, args) that returns an + ImageFile.PyDecoder object + + .. versionadded:: 4.1.0 + """ + DECODERS[name] = decoder + + +def register_encoder(name, encoder): + """ + Registers an image encoder. This function should not be + used in application code. + + :param name: The name of the encoder + :param encoder: A callable(mode, args) that returns an + ImageFile.PyEncoder object + + .. versionadded:: 4.1.0 + """ + ENCODERS[name] = encoder + + +# -------------------------------------------------------------------- +# Simple display support. User code may override this. + +def _show(image, **options): + # override me, as necessary + _showxv(image, **options) + + +def _showxv(image, title=None, **options): + from . import ImageShow + ImageShow.show(image, title, **options) + + +# -------------------------------------------------------------------- +# Effects + +def effect_mandelbrot(size, extent, quality): + """ + Generate a Mandelbrot set covering the given extent. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param extent: The extent to cover, as a 4-tuple: + (x0, y0, x1, y2). + :param quality: Quality. + """ + return Image()._new(core.effect_mandelbrot(size, extent, quality)) + + +def effect_noise(size, sigma): + """ + Generate Gaussian noise centered around 128. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param sigma: Standard deviation of noise. + """ + return Image()._new(core.effect_noise(size, sigma)) + + +def linear_gradient(mode): + """ + Generate 256x256 linear gradient from black to white, top to bottom. + + :param mode: Input mode. + """ + return Image()._new(core.linear_gradient(mode)) + + +def radial_gradient(mode): + """ + Generate 256x256 radial gradient from black to white, centre to edge. + + :param mode: Input mode. + """ + return Image()._new(core.radial_gradient(mode)) + + +# -------------------------------------------------------------------- +# Resources + +def _apply_env_variables(env=None): + if env is None: + env = os.environ + + for var_name, setter in [ + ('PILLOW_ALIGNMENT', core.set_alignment), + ('PILLOW_BLOCK_SIZE', core.set_block_size), + ('PILLOW_BLOCKS_MAX', core.set_blocks_max), + ]: + if var_name not in env: + continue + + var = env[var_name].lower() + + units = 1 + for postfix, mul in [('k', 1024), ('m', 1024*1024)]: + if var.endswith(postfix): + units = mul + var = var[:-len(postfix)] + + try: + var = int(var) * units + except ValueError: + warnings.warn("{0} is not int".format(var_name)) + continue + + try: + setter(var) + except ValueError as e: + warnings.warn("{0}: {1}".format(var_name, e)) + + +_apply_env_variables() +atexit.register(core.clear_cache) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Image.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Image.pyc new file mode 100644 index 0000000000000000000000000000000000000000..600fb75e2a4d3454f8fb0324a409dede0548f426 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Image.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageChops.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageChops.py new file mode 100644 index 0000000000000000000000000000000000000000..89016730e36c5e3f97d1a945f20625854b95a14c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageChops.py @@ -0,0 +1,283 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard channel operations +# +# History: +# 1996-03-24 fl Created +# 1996-08-13 fl Added logical operations (for "1" images) +# 2000-10-12 fl Added offset method (from Image.py) +# +# Copyright (c) 1997-2000 by Secret Labs AB +# Copyright (c) 1996-2000 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +def constant(image, value): + """Fill a channel with a given grey level. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.new("L", image.size, value) + + +def duplicate(image): + """Copy a channel. Alias for :py:meth:`PIL.Image.Image.copy`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return image.copy() + + +def invert(image): + """ + Invert an image (channel). + + .. code-block:: python + + out = MAX - image + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image.load() + return image._new(image.im.chop_invert()) + + +def lighter(image1, image2): + """ + Compares the two images, pixel by pixel, and returns a new image containing + the lighter values. + + .. code-block:: python + + out = max(image1, image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_lighter(image2.im)) + + +def darker(image1, image2): + """ + Compares the two images, pixel by pixel, and returns a new image + containing the darker values. + + .. code-block:: python + + out = min(image1, image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_darker(image2.im)) + + +def difference(image1, image2): + """ + Returns the absolute value of the pixel-by-pixel difference between the two + images. + + .. code-block:: python + + out = abs(image1 - image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_difference(image2.im)) + + +def multiply(image1, image2): + """ + Superimposes two images on top of each other. + + If you multiply an image with a solid black image, the result is black. If + you multiply with a solid white image, the image is unaffected. + + .. code-block:: python + + out = image1 * image2 / MAX + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_multiply(image2.im)) + + +def screen(image1, image2): + """ + Superimposes two inverted images on top of each other. + + .. code-block:: python + + out = MAX - ((MAX - image1) * (MAX - image2) / MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_screen(image2.im)) + + +def add(image1, image2, scale=1.0, offset=0): + """ + Adds two images, dividing the result by scale and adding the + offset. If omitted, scale defaults to 1.0, and offset to 0.0. + + .. code-block:: python + + out = ((image1 + image2) / scale + offset) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_add(image2.im, scale, offset)) + + +def subtract(image1, image2, scale=1.0, offset=0): + """ + Subtracts two images, dividing the result by scale and adding the + offset. If omitted, scale defaults to 1.0, and offset to 0.0. + + .. code-block:: python + + out = ((image1 - image2) / scale + offset) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_subtract(image2.im, scale, offset)) + + +def add_modulo(image1, image2): + """Add two images, without clipping the result. + + .. code-block:: python + + out = ((image1 + image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_add_modulo(image2.im)) + + +def subtract_modulo(image1, image2): + """Subtract two images, without clipping the result. + + .. code-block:: python + + out = ((image1 - image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_subtract_modulo(image2.im)) + + +def logical_and(image1, image2): + """Logical AND between two images. + + .. code-block:: python + + out = ((image1 and image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_and(image2.im)) + + +def logical_or(image1, image2): + """Logical OR between two images. + + .. code-block:: python + + out = ((image1 or image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_or(image2.im)) + + +def logical_xor(image1, image2): + """Logical XOR between two images. + + .. code-block:: python + + out = ((bool(image1) != bool(image2)) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_xor(image2.im)) + + +def blend(image1, image2, alpha): + """Blend images using constant transparency weight. Alias for + :py:meth:`PIL.Image.Image.blend`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.blend(image1, image2, alpha) + + +def composite(image1, image2, mask): + """Create composite using transparency mask. Alias for + :py:meth:`PIL.Image.Image.composite`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.composite(image1, image2, mask) + + +def offset(image, xoffset, yoffset=None): + """Returns a copy of the image where data has been offset by the given + distances. Data wraps around the edges. If **yoffset** is omitted, it + is assumed to be equal to **xoffset**. + + :param xoffset: The horizontal distance. + :param yoffset: The vertical distance. If omitted, both + distances are set to the same value. + :rtype: :py:class:`~PIL.Image.Image` + """ + + if yoffset is None: + yoffset = xoffset + image.load() + return image._new(image.im.offset(xoffset, yoffset)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageChops.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageChops.pyc new file mode 100644 index 0000000000000000000000000000000000000000..862f41c54a16bbb3bc658fc8e9fcca02fa3b1058 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageChops.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageCms.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageCms.py new file mode 100644 index 0000000000000000000000000000000000000000..d82e30efc462a4c0afcf0788f9380423cb2d7917 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageCms.py @@ -0,0 +1,955 @@ +# The Python Imaging Library. +# $Id$ + +# Optional color management support, based on Kevin Cazabon's PyCMS +# library. + +# History: + +# 2009-03-08 fl Added to PIL. + +# Copyright (C) 2002-2003 Kevin Cazabon +# Copyright (c) 2009 by Fredrik Lundh +# Copyright (c) 2013 by Eric Soroos + +# See the README file for information on usage and redistribution. See +# below for the original description. + +from __future__ import print_function +import sys + +from PIL import Image +try: + from PIL import _imagingcms +except ImportError as ex: + # Allow error import for doc purposes, but error out when accessing + # anything in core. + from _util import deferred_error + _imagingcms = deferred_error(ex) +from PIL._util import isStringType + +DESCRIPTION = """ +pyCMS + + a Python / PIL interface to the littleCMS ICC Color Management System + Copyright (C) 2002-2003 Kevin Cazabon + kevin@cazabon.com + http://www.cazabon.com + + pyCMS home page: http://www.cazabon.com/pyCMS + littleCMS home page: http://www.littlecms.com + (littleCMS is Copyright (C) 1998-2001 Marti Maria) + + Originally released under LGPL. Graciously donated to PIL in + March 2009, for distribution under the standard PIL license + + The pyCMS.py module provides a "clean" interface between Python/PIL and + pyCMSdll, taking care of some of the more complex handling of the direct + pyCMSdll functions, as well as error-checking and making sure that all + relevant data is kept together. + + While it is possible to call pyCMSdll functions directly, it's not highly + recommended. + + Version History: + + 1.0.0 pil Oct 2013 Port to LCMS 2. + + 0.1.0 pil mod March 10, 2009 + + Renamed display profile to proof profile. The proof + profile is the profile of the device that is being + simulated, not the profile of the device which is + actually used to display/print the final simulation + (that'd be the output profile) - also see LCMSAPI.txt + input colorspace -> using 'renderingIntent' -> proof + colorspace -> using 'proofRenderingIntent' -> output + colorspace + + Added LCMS FLAGS support. + Added FLAGS["SOFTPROOFING"] as default flag for + buildProofTransform (otherwise the proof profile/intent + would be ignored). + + 0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms + + 0.0.2 alpha Jan 6, 2002 + + Added try/except statements around type() checks of + potential CObjects... Python won't let you use type() + on them, and raises a TypeError (stupid, if you ask + me!) + + Added buildProofTransformFromOpenProfiles() function. + Additional fixes in DLL, see DLL code for details. + + 0.0.1 alpha first public release, Dec. 26, 2002 + + Known to-do list with current version (of Python interface, not pyCMSdll): + + none + +""" + +VERSION = "1.0.0 pil" + +# --------------------------------------------------------------------. + +core = _imagingcms + +# +# intent/direction values + +INTENT_PERCEPTUAL = 0 +INTENT_RELATIVE_COLORIMETRIC = 1 +INTENT_SATURATION = 2 +INTENT_ABSOLUTE_COLORIMETRIC = 3 + +DIRECTION_INPUT = 0 +DIRECTION_OUTPUT = 1 +DIRECTION_PROOF = 2 + +# +# flags + +FLAGS = { + "MATRIXINPUT": 1, + "MATRIXOUTPUT": 2, + "MATRIXONLY": (1 | 2), + "NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot + # Don't create prelinearization tables on precalculated transforms + # (internal use): + "NOPRELINEARIZATION": 16, + "GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink) + "NOTCACHE": 64, # Inhibit 1-pixel cache + "NOTPRECALC": 256, + "NULLTRANSFORM": 512, # Don't transform anyway + "HIGHRESPRECALC": 1024, # Use more memory to give better accuracy + "LOWRESPRECALC": 2048, # Use less memory to minimize resources + "WHITEBLACKCOMPENSATION": 8192, + "BLACKPOINTCOMPENSATION": 8192, + "GAMUTCHECK": 4096, # Out of Gamut alarm + "SOFTPROOFING": 16384, # Do softproofing + "PRESERVEBLACK": 32768, # Black preservation + "NODEFAULTRESOURCEDEF": 16777216, # CRD special + "GRIDPOINTS": lambda n: ((n) & 0xFF) << 16 # Gridpoints +} + +_MAX_FLAG = 0 +for flag in FLAGS.values(): + if isinstance(flag, int): + _MAX_FLAG = _MAX_FLAG | flag + + +# --------------------------------------------------------------------. +# Experimental PIL-level API +# --------------------------------------------------------------------. + +## +# Profile. + +class ImageCmsProfile(object): + + def __init__(self, profile): + """ + :param profile: Either a string representing a filename, + a file like object containing a profile or a + low-level profile object + + """ + + if isStringType(profile): + self._set(core.profile_open(profile), profile) + elif hasattr(profile, "read"): + self._set(core.profile_frombytes(profile.read())) + elif isinstance(profile, _imagingcms.CmsProfile): + self._set(profile) + else: + raise TypeError("Invalid type for Profile") + + def _set(self, profile, filename=None): + self.profile = profile + self.filename = filename + if profile: + self.product_name = None # profile.product_name + self.product_info = None # profile.product_info + else: + self.product_name = None + self.product_info = None + + def tobytes(self): + """ + Returns the profile in a format suitable for embedding in + saved images. + + :returns: a bytes object containing the ICC profile. + """ + + return core.profile_tobytes(self.profile) + + +class ImageCmsTransform(Image.ImagePointHandler): + + """ + Transform. This can be used with the procedural API, or with the standard + Image.point() method. + + Will return the output profile in the output.info['icc_profile']. + """ + + def __init__(self, input, output, input_mode, output_mode, + intent=INTENT_PERCEPTUAL, proof=None, + proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, flags=0): + if proof is None: + self.transform = core.buildTransform( + input.profile, output.profile, + input_mode, output_mode, + intent, + flags + ) + else: + self.transform = core.buildProofTransform( + input.profile, output.profile, proof.profile, + input_mode, output_mode, + intent, proof_intent, + flags + ) + # Note: inputMode and outputMode are for pyCMS compatibility only + self.input_mode = self.inputMode = input_mode + self.output_mode = self.outputMode = output_mode + + self.output_profile = output + + def point(self, im): + return self.apply(im) + + def apply(self, im, imOut=None): + im.load() + if imOut is None: + imOut = Image.new(self.output_mode, im.size, None) + self.transform.apply(im.im.id, imOut.im.id) + imOut.info['icc_profile'] = self.output_profile.tobytes() + return imOut + + def apply_in_place(self, im): + im.load() + if im.mode != self.output_mode: + raise ValueError("mode mismatch") # wrong output mode + self.transform.apply(im.im.id, im.im.id) + im.info['icc_profile'] = self.output_profile.tobytes() + return im + + +def get_display_profile(handle=None): + """ (experimental) Fetches the profile for the current display device. + :returns: None if the profile is not known. + """ + + if sys.platform == "win32": + from PIL import ImageWin + if isinstance(handle, ImageWin.HDC): + profile = core.get_display_profile_win32(handle, 1) + else: + profile = core.get_display_profile_win32(handle or 0) + else: + try: + get = _imagingcms.get_display_profile + except AttributeError: + return None + else: + profile = get() + return ImageCmsProfile(profile) + + +# --------------------------------------------------------------------. +# pyCMS compatible layer +# --------------------------------------------------------------------. + +class PyCMSError(Exception): + + """ (pyCMS) Exception class. + This is used for all errors in the pyCMS API. """ + pass + + +def profileToProfile( + im, inputProfile, outputProfile, renderingIntent=INTENT_PERCEPTUAL, + outputMode=None, inPlace=0, flags=0): + """ + (pyCMS) Applies an ICC transformation to a given image, mapping from + inputProfile to outputProfile. + + If the input or output profiles specified are not valid filenames, a + PyCMSError will be raised. If inPlace == TRUE and outputMode != im.mode, + a PyCMSError will be raised. If an error occurs during application of + the profiles, a PyCMSError will be raised. If outputMode is not a mode + supported by the outputProfile (or by pyCMS), a PyCMSError will be + raised. + + This function applies an ICC transformation to im from inputProfile's + color space to outputProfile's color space using the specified rendering + intent to decide how to handle out-of-gamut colors. + + OutputMode can be used to specify that a color mode conversion is to + be done using these profiles, but the specified profiles must be able + to handle that mode. I.e., if converting im from RGB to CMYK using + profiles, the input profile must handle RGB data, and the output + profile must handle CMYK data. + + :param im: An open PIL image object (i.e. Image.new(...) or + Image.open(...), etc.) + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this image, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + profile you wish to use for this image, or a profile object + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the transform + + INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL) + INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC) + INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION) + INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC) + + see the pyCMS documentation for details on rendering intents and what + they do. + :param outputMode: A valid PIL mode for the output image (i.e. "RGB", + "CMYK", etc.). Note: if rendering the image "inPlace", outputMode + MUST be the same mode as the input, or omitted completely. If + omitted, the outputMode will be the same as the mode of the input + image (im.mode) + :param inPlace: Boolean (1 = True, None or 0 = False). If True, the + original image is modified in-place, and None is returned. If False + (default), a new Image object is returned with the transform applied. + :param flags: Integer (0-...) specifying additional flags + :returns: Either None or a new PIL image object, depending on value of + inPlace + :exception PyCMSError: + """ + + if outputMode is None: + outputMode = im.mode + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + raise PyCMSError("renderingIntent must be an integer between 0 and 3") + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + raise PyCMSError( + "flags must be an integer between 0 and %s" + _MAX_FLAG) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + transform = ImageCmsTransform( + inputProfile, outputProfile, im.mode, outputMode, + renderingIntent, flags=flags + ) + if inPlace: + transform.apply_in_place(im) + imOut = None + else: + imOut = transform.apply(im) + except (IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + return imOut + + +def getOpenProfile(profileFilename): + """ + (pyCMS) Opens an ICC profile file. + + The PyCMSProfile object can be passed back into pyCMS for use in creating + transforms and such (as in ImageCms.buildTransformFromOpenProfiles()). + + If profileFilename is not a valid filename for an ICC profile, a PyCMSError + will be raised. + + :param profileFilename: String, as a valid filename path to the ICC profile + you wish to open, or a file-like object. + :returns: A CmsProfile class object. + :exception PyCMSError: + """ + + try: + return ImageCmsProfile(profileFilename) + except (IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def buildTransform( + inputProfile, outputProfile, inMode, outMode, + renderingIntent=INTENT_PERCEPTUAL, flags=0): + """ + (pyCMS) Builds an ICC transform mapping from the inputProfile to the + outputProfile. Use applyTransform to apply the transform to a given + image. + + If the input or output profiles specified are not valid filenames, a + PyCMSError will be raised. If an error occurs during creation of the + transform, a PyCMSError will be raised. + + If inMode or outMode are not a mode supported by the outputProfile (or + by pyCMS), a PyCMSError will be raised. + + This function builds and returns an ICC transform from the inputProfile + to the outputProfile using the renderingIntent to determine what to do + with out-of-gamut colors. It will ONLY work for converting images that + are in inMode to images that are in outMode color format (PIL mode, + i.e. "RGB", "RGBA", "CMYK", etc.). + + Building the transform is a fair part of the overhead in + ImageCms.profileToProfile(), so if you're planning on converting multiple + images using the same input/output settings, this can save you time. + Once you have a transform object, it can be used with + ImageCms.applyProfile() to convert images without the need to re-compute + the lookup table for the transform. + + The reason pyCMS returns a class object rather than a handle directly + to the transform is that it needs to keep track of the PIL input/output + modes that the transform is meant for. These attributes are stored in + the "inMode" and "outMode" attributes of the object (which can be + manually overridden if you really want to, but I don't know of any + time that would be of use, or would even work). + + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this transform, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + profile you wish to use for this transform, or a profile object + :param inMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param outMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the transform + + INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL) + INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC) + INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION) + INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC) + + see the pyCMS documentation for details on rendering intents and what + they do. + :param flags: Integer (0-...) specifying additional flags + :returns: A CmsTransform class object. + :exception PyCMSError: + """ + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + raise PyCMSError("renderingIntent must be an integer between 0 and 3") + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + raise PyCMSError( + "flags must be an integer between 0 and %s" + _MAX_FLAG) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + return ImageCmsTransform( + inputProfile, outputProfile, inMode, outMode, + renderingIntent, flags=flags) + except (IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def buildProofTransform( + inputProfile, outputProfile, proofProfile, inMode, outMode, + renderingIntent=INTENT_PERCEPTUAL, + proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC, + flags=FLAGS["SOFTPROOFING"]): + """ + (pyCMS) Builds an ICC transform mapping from the inputProfile to the + outputProfile, but tries to simulate the result that would be + obtained on the proofProfile device. + + If the input, output, or proof profiles specified are not valid + filenames, a PyCMSError will be raised. + + If an error occurs during creation of the transform, a PyCMSError will + be raised. + + If inMode or outMode are not a mode supported by the outputProfile + (or by pyCMS), a PyCMSError will be raised. + + This function builds and returns an ICC transform from the inputProfile + to the outputProfile, but tries to simulate the result that would be + obtained on the proofProfile device using renderingIntent and + proofRenderingIntent to determine what to do with out-of-gamut + colors. This is known as "soft-proofing". It will ONLY work for + converting images that are in inMode to images that are in outMode + color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.). + + Usage of the resulting transform object is exactly the same as with + ImageCms.buildTransform(). + + Proof profiling is generally used when using an output device to get a + good idea of what the final printed/displayed image would look like on + the proofProfile device when it's quicker and easier to use the + output device for judging color. Generally, this means that the + output device is a monitor, or a dye-sub printer (etc.), and the simulated + device is something more expensive, complicated, or time consuming + (making it difficult to make a real print for color judgement purposes). + + Soft-proofing basically functions by adjusting the colors on the + output device to match the colors of the device being simulated. However, + when the simulated device has a much wider gamut than the output + device, you may obtain marginal results. + + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this transform, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + (monitor, usually) profile you wish to use for this transform, or a + profile object + :param proofProfile: String, as a valid filename path to the ICC proof + profile you wish to use for this transform, or a profile object + :param inMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param outMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the input->proof (simulated) transform + + INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL) + INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC) + INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION) + INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC) + + see the pyCMS documentation for details on rendering intents and what + they do. + :param proofRenderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for proof->output transform + + INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL) + INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC) + INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION) + INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC) + + see the pyCMS documentation for details on rendering intents and what + they do. + :param flags: Integer (0-...) specifying additional flags + :returns: A CmsTransform class object. + :exception PyCMSError: + """ + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + raise PyCMSError("renderingIntent must be an integer between 0 and 3") + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + raise PyCMSError( + "flags must be an integer between 0 and %s" + _MAX_FLAG) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + if not isinstance(proofProfile, ImageCmsProfile): + proofProfile = ImageCmsProfile(proofProfile) + return ImageCmsTransform( + inputProfile, outputProfile, inMode, outMode, renderingIntent, + proofProfile, proofRenderingIntent, flags) + except (IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +buildTransformFromOpenProfiles = buildTransform +buildProofTransformFromOpenProfiles = buildProofTransform + + +def applyTransform(im, transform, inPlace=0): + """ + (pyCMS) Applies a transform to a given image. + + If im.mode != transform.inMode, a PyCMSError is raised. + + If inPlace == TRUE and transform.inMode != transform.outMode, a + PyCMSError is raised. + + If im.mode, transfer.inMode, or transfer.outMode is not supported by + pyCMSdll or the profiles you used for the transform, a PyCMSError is + raised. + + If an error occurs while the transform is being applied, a PyCMSError + is raised. + + This function applies a pre-calculated transform (from + ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles()) + to an image. The transform can be used for multiple images, saving + considerable calculation time if doing the same conversion multiple times. + + If you want to modify im in-place instead of receiving a new image as + the return value, set inPlace to TRUE. This can only be done if + transform.inMode and transform.outMode are the same, because we can't + change the mode in-place (the buffer sizes for some modes are + different). The default behavior is to return a new Image object of + the same dimensions in mode transform.outMode. + + :param im: A PIL Image object, and im.mode must be the same as the inMode + supported by the transform. + :param transform: A valid CmsTransform class object + :param inPlace: Bool (1 == True, 0 or None == False). If True, im is + modified in place and None is returned, if False, a new Image object + with the transform applied is returned (and im is not changed). The + default is False. + :returns: Either None, or a new PIL Image object, depending on the value of + inPlace. The profile will be returned in the image's + info['icc_profile']. + :exception PyCMSError: + """ + + try: + if inPlace: + transform.apply_in_place(im) + imOut = None + else: + imOut = transform.apply(im) + except (TypeError, ValueError) as v: + raise PyCMSError(v) + + return imOut + + +def createProfile(colorSpace, colorTemp=-1): + """ + (pyCMS) Creates a profile. + + If colorSpace not in ["LAB", "XYZ", "sRGB"], a PyCMSError is raised + + If using LAB and colorTemp != a positive integer, a PyCMSError is raised. + + If an error occurs while creating the profile, a PyCMSError is raised. + + Use this function to create common profiles on-the-fly instead of + having to supply a profile on disk and knowing the path to it. It + returns a normal CmsProfile object that can be passed to + ImageCms.buildTransformFromOpenProfiles() to create a transform to apply + to images. + + :param colorSpace: String, the color space of the profile you wish to + create. + Currently only "LAB", "XYZ", and "sRGB" are supported. + :param colorTemp: Positive integer for the white point for the profile, in + degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50 + illuminant if omitted (5000k). colorTemp is ONLY applied to LAB + profiles, and is ignored for XYZ and sRGB. + :returns: A CmsProfile class object + :exception PyCMSError: + """ + + if colorSpace not in ["LAB", "XYZ", "sRGB"]: + raise PyCMSError( + "Color space not supported for on-the-fly profile creation (%s)" + % colorSpace) + + if colorSpace == "LAB": + try: + colorTemp = float(colorTemp) + except: + raise PyCMSError( + "Color temperature must be numeric, \"%s\" not valid" + % colorTemp) + + try: + return core.createProfile(colorSpace, colorTemp) + except (TypeError, ValueError) as v: + raise PyCMSError(v) + + +def getProfileName(profile): + """ + + (pyCMS) Gets the internal product name for the given profile. + + If profile isn't a valid CmsProfile object or filename to a profile, + a PyCMSError is raised If an error occurs while trying to obtain the + name tag, a PyCMSError is raised. + + Use this function to obtain the INTERNAL name of the profile (stored + in an ICC tag in the profile itself), usually the one used when the + profile was originally created. Sometimes this tag also contains + additional information supplied by the creator. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal name of the profile as stored + in an ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # do it in python, not c. + # // name was "%s - %s" (model, manufacturer) || Description , + # // but if the Model and Manufacturer were the same or the model + # // was long, Just the model, in 1.x + model = profile.profile.product_model + manufacturer = profile.profile.product_manufacturer + + if not (model or manufacturer): + return profile.profile.product_description + "\n" + if not manufacturer or len(model) > 30: + return model + "\n" + return "%s - %s\n" % (model, manufacturer) + + except (AttributeError, IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def getProfileInfo(profile): + """ + (pyCMS) Gets the internal product information for the given profile. + + If profile isn't a valid CmsProfile object or filename to a profile, + a PyCMSError is raised. + + If an error occurs while trying to obtain the info tag, a PyCMSError + is raised + + Use this function to obtain the information stored in the profile's + info tag. This often contains details about the profile, and how it + was created, as supplied by the creator. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # add an extra newline to preserve pyCMS compatibility + # Python, not C. the white point bits weren't working well, + # so skipping. + # // info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint + description = profile.profile.product_description + cpright = profile.profile.product_copyright + arr = [] + for elt in (description, cpright): + if elt: + arr.append(elt) + return "\r\n\r\n".join(arr) + "\r\n\r\n" + + except (AttributeError, IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def getProfileCopyright(profile): + """ + (pyCMS) Gets the copyright for the given profile. + + If profile isn't a valid CmsProfile object or filename to a profile, + a PyCMSError is raised. + + If an error occurs while trying to obtain the copyright tag, a PyCMSError + is raised + + Use this function to obtain the information stored in the profile's + copyright tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return profile.profile.product_copyright + "\n" + except (AttributeError, IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def getProfileManufacturer(profile): + """ + (pyCMS) Gets the manufacturer for the given profile. + + If profile isn't a valid CmsProfile object or filename to a profile, + a PyCMSError is raised. + + If an error occurs while trying to obtain the manufacturer tag, a + PyCMSError is raised + + Use this function to obtain the information stored in the profile's + manufacturer tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return profile.profile.product_manufacturer + "\n" + except (AttributeError, IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def getProfileModel(profile): + """ + (pyCMS) Gets the model for the given profile. + + If profile isn't a valid CmsProfile object or filename to a profile, + a PyCMSError is raised. + + If an error occurs while trying to obtain the model tag, a PyCMSError + is raised + + Use this function to obtain the information stored in the profile's + model tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return profile.profile.product_model + "\n" + except (AttributeError, IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def getProfileDescription(profile): + """ + (pyCMS) Gets the description for the given profile. + + If profile isn't a valid CmsProfile object or filename to a profile, + a PyCMSError is raised. + + If an error occurs while trying to obtain the description tag, a PyCMSError + is raised + + Use this function to obtain the information stored in the profile's + description tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in an + ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return profile.profile.product_description + "\n" + except (AttributeError, IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def getDefaultIntent(profile): + """ + (pyCMS) Gets the default intent name for the given profile. + + If profile isn't a valid CmsProfile object or filename to a profile, + a PyCMSError is raised. + + If an error occurs while trying to obtain the default intent, a + PyCMSError is raised. + + Use this function to determine the default (and usually best optimized) + rendering intent for this profile. Most profiles support multiple + rendering intents, but are intended mostly for one type of conversion. + If you wish to use a different intent than returned, use + ImageCms.isIntentSupported() to verify it will work first. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: Integer 0-3 specifying the default rendering intent for this + profile. + + INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL) + INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC) + INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION) + INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC) + + see the pyCMS documentation for details on rendering intents and what + they do. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return profile.profile.rendering_intent + except (AttributeError, IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def isIntentSupported(profile, intent, direction): + """ + (pyCMS) Checks if a given intent is supported. + + Use this function to verify that you can use your desired + renderingIntent with profile, and that profile can be used for the + input/output/proof profile as you desire. + + Some profiles are created specifically for one "direction", can cannot + be used for others. Some profiles can only be used for certain + rendering intents... so it's best to either verify this before trying + to create a transform with them (using this function), or catch the + potential PyCMSError that will occur if they don't support the modes + you select. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :param intent: Integer (0-3) specifying the rendering intent you wish to + use with this profile + + INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL) + INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC) + INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION) + INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC) + + see the pyCMS documentation for details on rendering intents and what + they do. + :param direction: Integer specifying if the profile is to be used for input, + output, or proof + + INPUT = 0 (or use ImageCms.DIRECTION_INPUT) + OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT) + PROOF = 2 (or use ImageCms.DIRECTION_PROOF) + + :returns: 1 if the intent/direction are supported, -1 if they are not. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # FIXME: I get different results for the same data w. different + # compilers. Bug in LittleCMS or in the binding? + if profile.profile.is_intent_supported(intent, direction): + return 1 + else: + return -1 + except (AttributeError, IOError, TypeError, ValueError) as v: + raise PyCMSError(v) + + +def versions(): + """ + (pyCMS) Fetches versions. + """ + + return ( + VERSION, core.littlecms_version, + sys.version.split()[0], Image.VERSION + ) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageCms.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageCms.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb83cc4f77171a8f8cf2007f6943bcc1b8c36ca7 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageCms.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageColor.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageColor.py new file mode 100644 index 0000000000000000000000000000000000000000..08c00fd54215e02b821290c96ec5221a52590cca --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageColor.py @@ -0,0 +1,309 @@ +# +# The Python Imaging Library +# $Id$ +# +# map CSS3-style colour description strings to RGB +# +# History: +# 2002-10-24 fl Added support for CSS-style color strings +# 2002-12-15 fl Added RGBA support +# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2 +# 2004-07-19 fl Fixed gray/grey spelling issues +# 2009-03-05 fl Fixed rounding error in grayscale calculation +# +# Copyright (c) 2002-2004 by Secret Labs AB +# Copyright (c) 2002-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image +import re + + +def getrgb(color): + """ + Convert a color string to an RGB tuple. If the string cannot be parsed, + this function raises a :py:exc:`ValueError` exception. + + .. versionadded:: 1.1.4 + + :param color: A color string + :return: ``(red, green, blue[, alpha])`` + """ + color = color.lower() + + rgb = colormap.get(color, None) + if rgb: + if isinstance(rgb, tuple): + return rgb + colormap[color] = rgb = getrgb(rgb) + return rgb + + # check for known string formats + if re.match('#[a-f0-9]{3}$', color): + return ( + int(color[1]*2, 16), + int(color[2]*2, 16), + int(color[3]*2, 16), + ) + + if re.match('#[a-f0-9]{4}$', color): + return ( + int(color[1]*2, 16), + int(color[2]*2, 16), + int(color[3]*2, 16), + int(color[4]*2, 16), + ) + + if re.match('#[a-f0-9]{6}$', color): + return ( + int(color[1:3], 16), + int(color[3:5], 16), + int(color[5:7], 16), + ) + + if re.match('#[a-f0-9]{8}$', color): + return ( + int(color[1:3], 16), + int(color[3:5], 16), + int(color[5:7], 16), + int(color[7:9], 16), + ) + + m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color) + if m: + return ( + int(m.group(1)), + int(m.group(2)), + int(m.group(3)) + ) + + m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color) + if m: + return ( + int((int(m.group(1)) * 255) / 100.0 + 0.5), + int((int(m.group(2)) * 255) / 100.0 + 0.5), + int((int(m.group(3)) * 255) / 100.0 + 0.5) + ) + + m = re.match(r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color) + if m: + from colorsys import hls_to_rgb + rgb = hls_to_rgb( + float(m.group(1)) / 360.0, + float(m.group(3)) / 100.0, + float(m.group(2)) / 100.0, + ) + return ( + int(rgb[0] * 255 + 0.5), + int(rgb[1] * 255 + 0.5), + int(rgb[2] * 255 + 0.5) + ) + + m = re.match(r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color) + if m: + from colorsys import hsv_to_rgb + rgb = hsv_to_rgb( + float(m.group(1)) / 360.0, + float(m.group(2)) / 100.0, + float(m.group(3)) / 100.0, + ) + return ( + int(rgb[0] * 255 + 0.5), + int(rgb[1] * 255 + 0.5), + int(rgb[2] * 255 + 0.5) + ) + + m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", + color) + if m: + return ( + int(m.group(1)), + int(m.group(2)), + int(m.group(3)), + int(m.group(4)) + ) + raise ValueError("unknown color specifier: %r" % color) + + +def getcolor(color, mode): + """ + Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a + greyscale value if the mode is not color or a palette image. If the string + cannot be parsed, this function raises a :py:exc:`ValueError` exception. + + .. versionadded:: 1.1.4 + + :param color: A color string + :return: ``(graylevel [, alpha]) or (red, green, blue[, alpha])`` + """ + # same as getrgb, but converts the result to the given mode + color, alpha = getrgb(color), 255 + if len(color) == 4: + color, alpha = color[0:3], color[3] + + if Image.getmodebase(mode) == "L": + r, g, b = color + color = (r*299 + g*587 + b*114)//1000 + if mode[-1] == 'A': + return (color, alpha) + else: + if mode[-1] == 'A': + return color + (alpha,) + return color + + +colormap = { + # X11 colour table from https://drafts.csswg.org/css-color-4/, with + # gray/grey spelling issues fixed. This is a superset of HTML 4.0 + # colour names used in CSS 1. + "aliceblue": "#f0f8ff", + "antiquewhite": "#faebd7", + "aqua": "#00ffff", + "aquamarine": "#7fffd4", + "azure": "#f0ffff", + "beige": "#f5f5dc", + "bisque": "#ffe4c4", + "black": "#000000", + "blanchedalmond": "#ffebcd", + "blue": "#0000ff", + "blueviolet": "#8a2be2", + "brown": "#a52a2a", + "burlywood": "#deb887", + "cadetblue": "#5f9ea0", + "chartreuse": "#7fff00", + "chocolate": "#d2691e", + "coral": "#ff7f50", + "cornflowerblue": "#6495ed", + "cornsilk": "#fff8dc", + "crimson": "#dc143c", + "cyan": "#00ffff", + "darkblue": "#00008b", + "darkcyan": "#008b8b", + "darkgoldenrod": "#b8860b", + "darkgray": "#a9a9a9", + "darkgrey": "#a9a9a9", + "darkgreen": "#006400", + "darkkhaki": "#bdb76b", + "darkmagenta": "#8b008b", + "darkolivegreen": "#556b2f", + "darkorange": "#ff8c00", + "darkorchid": "#9932cc", + "darkred": "#8b0000", + "darksalmon": "#e9967a", + "darkseagreen": "#8fbc8f", + "darkslateblue": "#483d8b", + "darkslategray": "#2f4f4f", + "darkslategrey": "#2f4f4f", + "darkturquoise": "#00ced1", + "darkviolet": "#9400d3", + "deeppink": "#ff1493", + "deepskyblue": "#00bfff", + "dimgray": "#696969", + "dimgrey": "#696969", + "dodgerblue": "#1e90ff", + "firebrick": "#b22222", + "floralwhite": "#fffaf0", + "forestgreen": "#228b22", + "fuchsia": "#ff00ff", + "gainsboro": "#dcdcdc", + "ghostwhite": "#f8f8ff", + "gold": "#ffd700", + "goldenrod": "#daa520", + "gray": "#808080", + "grey": "#808080", + "green": "#008000", + "greenyellow": "#adff2f", + "honeydew": "#f0fff0", + "hotpink": "#ff69b4", + "indianred": "#cd5c5c", + "indigo": "#4b0082", + "ivory": "#fffff0", + "khaki": "#f0e68c", + "lavender": "#e6e6fa", + "lavenderblush": "#fff0f5", + "lawngreen": "#7cfc00", + "lemonchiffon": "#fffacd", + "lightblue": "#add8e6", + "lightcoral": "#f08080", + "lightcyan": "#e0ffff", + "lightgoldenrodyellow": "#fafad2", + "lightgreen": "#90ee90", + "lightgray": "#d3d3d3", + "lightgrey": "#d3d3d3", + "lightpink": "#ffb6c1", + "lightsalmon": "#ffa07a", + "lightseagreen": "#20b2aa", + "lightskyblue": "#87cefa", + "lightslategray": "#778899", + "lightslategrey": "#778899", + "lightsteelblue": "#b0c4de", + "lightyellow": "#ffffe0", + "lime": "#00ff00", + "limegreen": "#32cd32", + "linen": "#faf0e6", + "magenta": "#ff00ff", + "maroon": "#800000", + "mediumaquamarine": "#66cdaa", + "mediumblue": "#0000cd", + "mediumorchid": "#ba55d3", + "mediumpurple": "#9370db", + "mediumseagreen": "#3cb371", + "mediumslateblue": "#7b68ee", + "mediumspringgreen": "#00fa9a", + "mediumturquoise": "#48d1cc", + "mediumvioletred": "#c71585", + "midnightblue": "#191970", + "mintcream": "#f5fffa", + "mistyrose": "#ffe4e1", + "moccasin": "#ffe4b5", + "navajowhite": "#ffdead", + "navy": "#000080", + "oldlace": "#fdf5e6", + "olive": "#808000", + "olivedrab": "#6b8e23", + "orange": "#ffa500", + "orangered": "#ff4500", + "orchid": "#da70d6", + "palegoldenrod": "#eee8aa", + "palegreen": "#98fb98", + "paleturquoise": "#afeeee", + "palevioletred": "#db7093", + "papayawhip": "#ffefd5", + "peachpuff": "#ffdab9", + "peru": "#cd853f", + "pink": "#ffc0cb", + "plum": "#dda0dd", + "powderblue": "#b0e0e6", + "purple": "#800080", + "rebeccapurple": "#663399", + "red": "#ff0000", + "rosybrown": "#bc8f8f", + "royalblue": "#4169e1", + "saddlebrown": "#8b4513", + "salmon": "#fa8072", + "sandybrown": "#f4a460", + "seagreen": "#2e8b57", + "seashell": "#fff5ee", + "sienna": "#a0522d", + "silver": "#c0c0c0", + "skyblue": "#87ceeb", + "slateblue": "#6a5acd", + "slategray": "#708090", + "slategrey": "#708090", + "snow": "#fffafa", + "springgreen": "#00ff7f", + "steelblue": "#4682b4", + "tan": "#d2b48c", + "teal": "#008080", + "thistle": "#d8bfd8", + "tomato": "#ff6347", + "turquoise": "#40e0d0", + "violet": "#ee82ee", + "wheat": "#f5deb3", + "white": "#ffffff", + "whitesmoke": "#f5f5f5", + "yellow": "#ffff00", + "yellowgreen": "#9acd32", +} diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageColor.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageColor.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abc9dcdcfeb426c82cb41c26597ba7d5b452dcf9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageColor.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc8902523c5add1d9f38a238ee837b7748b7ea0 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw.py @@ -0,0 +1,387 @@ +# +# The Python Imaging Library +# $Id$ +# +# drawing interface operations +# +# History: +# 1996-04-13 fl Created (experimental) +# 1996-08-07 fl Filled polygons, ellipses. +# 1996-08-13 fl Added text support +# 1998-06-28 fl Handle I and F images +# 1998-12-29 fl Added arc; use arc primitive to draw ellipses +# 1999-01-10 fl Added shape stuff (experimental) +# 1999-02-06 fl Added bitmap support +# 1999-02-11 fl Changed all primitives to take options +# 1999-02-20 fl Fixed backwards compatibility +# 2000-10-12 fl Copy on write, when necessary +# 2001-02-18 fl Use default ink for bitmap/text also in fill mode +# 2002-10-24 fl Added support for CSS-style color strings +# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing +# 2002-12-11 fl Refactored low-level drawing API (work in progress) +# 2004-08-26 fl Made Draw() a factory function, added getdraw() support +# 2004-09-04 fl Added width support to line primitive +# 2004-09-10 fl Added font mode handling +# 2006-06-19 fl Added font bearing support (getmask2) +# +# Copyright (c) 1997-2006 by Secret Labs AB +# Copyright (c) 1996-2006 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import numbers + +from . import Image, ImageColor +from ._util import isStringType + +""" +A simple 2D drawing interface for PIL images. +

+Application code should use the Draw factory, instead of +directly. +""" + + +class ImageDraw(object): + + def __init__(self, im, mode=None): + """ + Create a drawing instance. + + :param im: The image to draw in. + :param mode: Optional mode to use for color values. For RGB + images, this argument can be RGB or RGBA (to blend the + drawing into the image). For all other modes, this argument + must be the same as the image mode. If omitted, the mode + defaults to the mode of the image. + """ + im.load() + if im.readonly: + im._copy() # make it writeable + blend = 0 + if mode is None: + mode = im.mode + if mode != im.mode: + if mode == "RGBA" and im.mode == "RGB": + blend = 1 + else: + raise ValueError("mode mismatch") + if mode == "P": + self.palette = im.palette + else: + self.palette = None + self.im = im.im + self.draw = Image.core.draw(self.im, blend) + self.mode = mode + if mode in ("I", "F"): + self.ink = self.draw.draw_ink(1, mode) + else: + self.ink = self.draw.draw_ink(-1, mode) + if mode in ("1", "P", "I", "F"): + # FIXME: fix Fill2 to properly support matte for I+F images + self.fontmode = "1" + else: + self.fontmode = "L" # aliasing is okay for other modes + self.fill = 0 + self.font = None + + def getfont(self): + """ + Get the current default font. + + :returns: An image font.""" + if not self.font: + # FIXME: should add a font repository + from . import ImageFont + self.font = ImageFont.load_default() + return self.font + + def _getink(self, ink, fill=None): + if ink is None and fill is None: + if self.fill: + fill = self.ink + else: + ink = self.ink + else: + if ink is not None: + if isStringType(ink): + ink = ImageColor.getcolor(ink, self.mode) + if self.palette and not isinstance(ink, numbers.Number): + ink = self.palette.getcolor(ink) + ink = self.draw.draw_ink(ink, self.mode) + if fill is not None: + if isStringType(fill): + fill = ImageColor.getcolor(fill, self.mode) + if self.palette and not isinstance(fill, numbers.Number): + fill = self.palette.getcolor(fill) + fill = self.draw.draw_ink(fill, self.mode) + return ink, fill + + def arc(self, xy, start, end, fill=None): + """Draw an arc.""" + ink, fill = self._getink(fill) + if ink is not None: + self.draw.draw_arc(xy, start, end, ink) + + def bitmap(self, xy, bitmap, fill=None): + """Draw a bitmap.""" + bitmap.load() + ink, fill = self._getink(fill) + if ink is None: + ink = fill + if ink is not None: + self.draw.draw_bitmap(xy, bitmap.im, ink) + + def chord(self, xy, start, end, fill=None, outline=None): + """Draw a chord.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_chord(xy, start, end, fill, 1) + if ink is not None: + self.draw.draw_chord(xy, start, end, ink, 0) + + def ellipse(self, xy, fill=None, outline=None): + """Draw an ellipse.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_ellipse(xy, fill, 1) + if ink is not None: + self.draw.draw_ellipse(xy, ink, 0) + + def line(self, xy, fill=None, width=0): + """Draw a line, or a connected sequence of line segments.""" + ink, fill = self._getink(fill) + if ink is not None: + self.draw.draw_lines(xy, ink, width) + + def shape(self, shape, fill=None, outline=None): + """(Experimental) Draw a shape.""" + shape.close() + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_outline(shape, fill, 1) + if ink is not None: + self.draw.draw_outline(shape, ink, 0) + + def pieslice(self, xy, start, end, fill=None, outline=None): + """Draw a pieslice.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_pieslice(xy, start, end, fill, 1) + if ink is not None: + self.draw.draw_pieslice(xy, start, end, ink, 0) + + def point(self, xy, fill=None): + """Draw one or more individual pixels.""" + ink, fill = self._getink(fill) + if ink is not None: + self.draw.draw_points(xy, ink) + + def polygon(self, xy, fill=None, outline=None): + """Draw a polygon.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_polygon(xy, fill, 1) + if ink is not None: + self.draw.draw_polygon(xy, ink, 0) + + def rectangle(self, xy, fill=None, outline=None): + """Draw a rectangle.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_rectangle(xy, fill, 1) + if ink is not None: + self.draw.draw_rectangle(xy, ink, 0) + + def _multiline_check(self, text): + """Draw text.""" + split_character = "\n" if isinstance(text, str) else b"\n" + + return split_character in text + + def _multiline_split(self, text): + split_character = "\n" if isinstance(text, str) else b"\n" + + return text.split(split_character) + + def text(self, xy, text, fill=None, font=None, anchor=None, + *args, **kwargs): + if self._multiline_check(text): + return self.multiline_text(xy, text, fill, font, anchor, + *args, **kwargs) + ink, fill = self._getink(fill) + if font is None: + font = self.getfont() + if ink is None: + ink = fill + if ink is not None: + try: + mask, offset = font.getmask2(text, self.fontmode, *args, **kwargs) + xy = xy[0] + offset[0], xy[1] + offset[1] + except AttributeError: + try: + mask = font.getmask(text, self.fontmode, *args, **kwargs) + except TypeError: + mask = font.getmask(text) + self.draw.draw_bitmap(xy, mask, ink) + + def multiline_text(self, xy, text, fill=None, font=None, anchor=None, + spacing=4, align="left", direction=None, features=None): + widths = [] + max_width = 0 + lines = self._multiline_split(text) + line_spacing = self.textsize('A', font=font)[1] + spacing + for line in lines: + line_width, line_height = self.textsize(line, font) + widths.append(line_width) + max_width = max(max_width, line_width) + left, top = xy + for idx, line in enumerate(lines): + if align == "left": + pass # left = x + elif align == "center": + left += (max_width - widths[idx]) / 2.0 + elif align == "right": + left += (max_width - widths[idx]) + else: + assert False, 'align must be "left", "center" or "right"' + self.text((left, top), line, fill, font, anchor, + direction=direction, features=features) + top += line_spacing + left = xy[0] + + def textsize(self, text, font=None, spacing=4, direction=None, + features=None): + """Get the size of a given string, in pixels.""" + if self._multiline_check(text): + return self.multiline_textsize(text, font, spacing, + direction, features) + + if font is None: + font = self.getfont() + return font.getsize(text, direction, features) + + def multiline_textsize(self, text, font=None, spacing=4, direction=None, + features=None): + max_width = 0 + lines = self._multiline_split(text) + line_spacing = self.textsize('A', font=font)[1] + spacing + for line in lines: + line_width, line_height = self.textsize(line, font, spacing, + direction, features) + max_width = max(max_width, line_width) + return max_width, len(lines)*line_spacing - spacing + + +def Draw(im, mode=None): + """ + A simple 2D drawing interface for PIL images. + + :param im: The image to draw in. + :param mode: Optional mode to use for color values. For RGB + images, this argument can be RGB or RGBA (to blend the + drawing into the image). For all other modes, this argument + must be the same as the image mode. If omitted, the mode + defaults to the mode of the image. + """ + try: + return im.getdraw(mode) + except AttributeError: + return ImageDraw(im, mode) + + +# experimental access to the outline API +try: + Outline = Image.core.outline +except AttributeError: + Outline = None + + +def getdraw(im=None, hints=None): + """ + (Experimental) A more advanced 2D drawing interface for PIL images, + based on the WCK interface. + + :param im: The image to draw in. + :param hints: An optional list of hints. + :returns: A (drawing context, drawing resource factory) tuple. + """ + # FIXME: this needs more work! + # FIXME: come up with a better 'hints' scheme. + handler = None + if not hints or "nicest" in hints: + try: + from . import _imagingagg as handler + except ImportError: + pass + if handler is None: + from . import ImageDraw2 as handler + if im: + im = handler.Draw(im) + return im, handler + + +def floodfill(image, xy, value, border=None, thresh=0): + """ + (experimental) Fills a bounded region with a given color. + + :param image: Target image. + :param xy: Seed position (a 2-item coordinate tuple). See + :ref:`coordinate-system`. + :param value: Fill color. + :param border: Optional border value. If given, the region consists of + pixels with a color different from the border color. If not given, + the region consists of pixels having the same color as the seed + pixel. + :param thresh: Optional threshold value which specifies a maximum + tolerable difference of a pixel value from the 'background' in + order for it to be replaced. Useful for filling regions of non- + homogeneous, but similar, colors. + """ + # based on an implementation by Eric S. Raymond + pixel = image.load() + x, y = xy + try: + background = pixel[x, y] + if _color_diff(value, background) <= thresh: + return # seed point already has fill color + pixel[x, y] = value + except (ValueError, IndexError): + return # seed point outside image + edge = [(x, y)] + if border is None: + while edge: + newedge = [] + for (x, y) in edge: + for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)): + try: + p = pixel[s, t] + except IndexError: + pass + else: + if _color_diff(p, background) <= thresh: + pixel[s, t] = value + newedge.append((s, t)) + edge = newedge + else: + while edge: + newedge = [] + for (x, y) in edge: + for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)): + try: + p = pixel[s, t] + except IndexError: + pass + else: + if p != value and p != border: + pixel[s, t] = value + newedge.append((s, t)) + edge = newedge + + +def _color_diff(rgb1, rgb2): + """ + Uses 1-norm distance to calculate difference between two rgb values. + """ + return abs(rgb1[0]-rgb2[0]) + abs(rgb1[1]-rgb2[1]) + abs(rgb1[2]-rgb2[2]) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae29fb87cd29aa321d05bf4063fb2d41cb36c079 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw2.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw2.py new file mode 100644 index 0000000000000000000000000000000000000000..f7902b031f666f620e01aa04eb6d5acaaada2924 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw2.py @@ -0,0 +1,108 @@ +# +# The Python Imaging Library +# $Id$ +# +# WCK-style drawing interface operations +# +# History: +# 2003-12-07 fl created +# 2005-05-15 fl updated; added to PIL as ImageDraw2 +# 2005-05-15 fl added text support +# 2005-05-20 fl added arc/chord/pieslice support +# +# Copyright (c) 2003-2005 by Secret Labs AB +# Copyright (c) 2003-2005 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageColor, ImageDraw, ImageFont, ImagePath + + +class Pen(object): + def __init__(self, color, width=1, opacity=255): + self.color = ImageColor.getrgb(color) + self.width = width + + +class Brush(object): + def __init__(self, color, opacity=255): + self.color = ImageColor.getrgb(color) + + +class Font(object): + def __init__(self, color, file, size=12): + # FIXME: add support for bitmap fonts + self.color = ImageColor.getrgb(color) + self.font = ImageFont.truetype(file, size) + + +class Draw(object): + + def __init__(self, image, size=None, color=None): + if not hasattr(image, "im"): + image = Image.new(image, size, color) + self.draw = ImageDraw.Draw(image) + self.image = image + self.transform = None + + def flush(self): + return self.image + + def render(self, op, xy, pen, brush=None): + # handle color arguments + outline = fill = None + width = 1 + if isinstance(pen, Pen): + outline = pen.color + width = pen.width + elif isinstance(brush, Pen): + outline = brush.color + width = brush.width + if isinstance(brush, Brush): + fill = brush.color + elif isinstance(pen, Brush): + fill = pen.color + # handle transformation + if self.transform: + xy = ImagePath.Path(xy) + xy.transform(self.transform) + # render the item + if op == "line": + self.draw.line(xy, fill=outline, width=width) + else: + getattr(self.draw, op)(xy, fill=fill, outline=outline) + + def settransform(self, offset): + (xoffset, yoffset) = offset + self.transform = (1, 0, xoffset, 0, 1, yoffset) + + def arc(self, xy, start, end, *options): + self.render("arc", xy, start, end, *options) + + def chord(self, xy, start, end, *options): + self.render("chord", xy, start, end, *options) + + def ellipse(self, xy, *options): + self.render("ellipse", xy, *options) + + def line(self, xy, *options): + self.render("line", xy, *options) + + def pieslice(self, xy, start, end, *options): + self.render("pieslice", xy, start, end, *options) + + def polygon(self, xy, *options): + self.render("polygon", xy, *options) + + def rectangle(self, xy, *options): + self.render("rectangle", xy, *options) + + def text(self, xy, text, font): + if self.transform: + xy = ImagePath.Path(xy) + xy.transform(self.transform) + self.draw.text(xy, text, font=font.font, fill=font.color) + + def textsize(self, text, font): + return self.draw.textsize(text, font=font.font) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw2.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b617a77d0d0671e6f68a5bf66399241859b21c43 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageDraw2.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageEnhance.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageEnhance.py new file mode 100644 index 0000000000000000000000000000000000000000..11c9c3a06ac0903973b2c6f090ca1de3ab8167bc --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageEnhance.py @@ -0,0 +1,100 @@ +# +# The Python Imaging Library. +# $Id$ +# +# image enhancement classes +# +# For a background, see "Image Processing By Interpolation and +# Extrapolation", Paul Haeberli and Douglas Voorhies. Available +# at http://www.graficaobscura.com/interp/index.html +# +# History: +# 1996-03-23 fl Created +# 2009-06-16 fl Fixed mean calculation +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFilter, ImageStat + + +class _Enhance(object): + + def enhance(self, factor): + """ + Returns an enhanced image. + + :param factor: A floating point value controlling the enhancement. + Factor 1.0 always returns a copy of the original image, + lower factors mean less color (brightness, contrast, + etc), and higher values more. There are no restrictions + on this value. + :rtype: :py:class:`~PIL.Image.Image` + """ + return Image.blend(self.degenerate, self.image, factor) + + +class Color(_Enhance): + """Adjust image color balance. + + This class can be used to adjust the colour balance of an image, in + a manner similar to the controls on a colour TV set. An enhancement + factor of 0.0 gives a black and white image. A factor of 1.0 gives + the original image. + """ + def __init__(self, image): + self.image = image + self.intermediate_mode = 'L' + if 'A' in image.getbands(): + self.intermediate_mode = 'LA' + + self.degenerate = image.convert(self.intermediate_mode).convert(image.mode) + + +class Contrast(_Enhance): + """Adjust image contrast. + + This class can be used to control the contrast of an image, similar + to the contrast control on a TV set. An enhancement factor of 0.0 + gives a solid grey image. A factor of 1.0 gives the original image. + """ + def __init__(self, image): + self.image = image + mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5) + self.degenerate = Image.new("L", image.size, mean).convert(image.mode) + + if 'A' in image.getbands(): + self.degenerate.putalpha(image.getchannel('A')) + + +class Brightness(_Enhance): + """Adjust image brightness. + + This class can be used to control the brightness of an image. An + enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the + original image. + """ + def __init__(self, image): + self.image = image + self.degenerate = Image.new(image.mode, image.size, 0) + + if 'A' in image.getbands(): + self.degenerate.putalpha(image.getchannel('A')) + + +class Sharpness(_Enhance): + """Adjust image sharpness. + + This class can be used to adjust the sharpness of an image. An + enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the + original image, and a factor of 2.0 gives a sharpened image. + """ + def __init__(self, image): + self.image = image + self.degenerate = image.filter(ImageFilter.SMOOTH) + + if 'A' in image.getbands(): + self.degenerate.putalpha(image.getchannel('A')) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageEnhance.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageEnhance.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdbcc7bb016b83382b4eb48948ac2cb5079481f9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageEnhance.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFile.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFile.py new file mode 100644 index 0000000000000000000000000000000000000000..681dee5240a3b42d3ccf2d4e6634ab9a379ed4e3 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFile.py @@ -0,0 +1,667 @@ +# +# The Python Imaging Library. +# $Id$ +# +# base class for image file handlers +# +# history: +# 1995-09-09 fl Created +# 1996-03-11 fl Fixed load mechanism. +# 1996-04-15 fl Added pcx/xbm decoders. +# 1996-04-30 fl Added encoders. +# 1996-12-14 fl Added load helpers +# 1997-01-11 fl Use encode_to_file where possible +# 1997-08-27 fl Flush output in _save +# 1998-03-05 fl Use memory mapping for some modes +# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B" +# 1999-05-31 fl Added image parser +# 2000-10-12 fl Set readonly flag on memory-mapped images +# 2002-03-20 fl Use better messages for common decoder errors +# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available +# 2003-10-30 fl Added StubImageFile class +# 2004-02-25 fl Made incremental parser more robust +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1995-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image +from ._util import isPath +import io +import os +import sys +import struct + +MAXBLOCK = 65536 + +SAFEBLOCK = 1024*1024 + +LOAD_TRUNCATED_IMAGES = False + +ERRORS = { + -1: "image buffer overrun error", + -2: "decoding error", + -3: "unknown error", + -8: "bad configuration", + -9: "out of memory error" +} + + +def raise_ioerror(error): + try: + message = Image.core.getcodecstatus(error) + except AttributeError: + message = ERRORS.get(error) + if not message: + message = "decoder error %d" % error + raise IOError(message + " when reading image file") + + +# +# -------------------------------------------------------------------- +# Helpers + +def _tilesort(t): + # sort on offset + return t[2] + + +# +# -------------------------------------------------------------------- +# ImageFile base class + +class ImageFile(Image.Image): + "Base class for image file format handlers." + + def __init__(self, fp=None, filename=None): + Image.Image.__init__(self) + + self._min_frame = 0 + + self.tile = None + self.readonly = 1 # until we know better + + self.decoderconfig = () + self.decodermaxblock = MAXBLOCK + + if isPath(fp): + # filename + self.fp = open(fp, "rb") + self.filename = fp + self._exclusive_fp = True + else: + # stream + self.fp = fp + self.filename = filename + # can be overridden + self._exclusive_fp = None + + try: + self._open() + except (IndexError, # end of data + TypeError, # end of data (ord) + KeyError, # unsupported mode + EOFError, # got header but not the first frame + struct.error) as v: + # close the file only if we have opened it this constructor + if self._exclusive_fp: + self.fp.close() + raise SyntaxError(v) + + if not self.mode or self.size[0] <= 0: + raise SyntaxError("not identified by this driver") + + def draft(self, mode, size): + "Set draft mode" + + pass + + def get_format_mimetype(self): + if self.format is None: + return + return Image.MIME.get(self.format.upper()) + + def verify(self): + "Check file integrity" + + # raise exception if something's wrong. must be called + # directly after open, and closes file when finished. + if self._exclusive_fp: + self.fp.close() + self.fp = None + + def load(self): + "Load image data based on tile list" + + pixel = Image.Image.load(self) + + if self.tile is None: + raise IOError("cannot load this image") + if not self.tile: + return pixel + + self.map = None + use_mmap = self.filename and len(self.tile) == 1 + # As of pypy 2.1.0, memory mapping was failing here. + use_mmap = use_mmap and not hasattr(sys, 'pypy_version_info') + + readonly = 0 + + # look for read/seek overrides + try: + read = self.load_read + # don't use mmap if there are custom read/seek functions + use_mmap = False + except AttributeError: + read = self.fp.read + + try: + seek = self.load_seek + use_mmap = False + except AttributeError: + seek = self.fp.seek + + if use_mmap: + # try memory mapping + decoder_name, extents, offset, args = self.tile[0] + if decoder_name == "raw" and len(args) >= 3 and args[0] == self.mode \ + and args[0] in Image._MAPMODES: + try: + if hasattr(Image.core, "map"): + # use built-in mapper WIN32 only + self.map = Image.core.map(self.filename) + self.map.seek(offset) + self.im = self.map.readimage( + self.mode, self.size, args[1], args[2] + ) + else: + # use mmap, if possible + import mmap + with open(self.filename, "r") as fp: + self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) + self.im = Image.core.map_buffer( + self.map, self.size, decoder_name, extents, offset, args + ) + readonly = 1 + # After trashing self.im, we might need to reload the palette data. + if self.palette: + self.palette.dirty = 1 + except (AttributeError, EnvironmentError, ImportError): + self.map = None + + self.load_prepare() + err_code = -3 # initialize to unknown error + if not self.map: + # sort tiles in file order + self.tile.sort(key=_tilesort) + + try: + # FIXME: This is a hack to handle TIFF's JpegTables tag. + prefix = self.tile_prefix + except AttributeError: + prefix = b"" + + for decoder_name, extents, offset, args in self.tile: + decoder = Image._getdecoder(self.mode, decoder_name, + args, self.decoderconfig) + try: + seek(offset) + decoder.setimage(self.im, extents) + if decoder.pulls_fd: + decoder.setfd(self.fp) + status, err_code = decoder.decode(b"") + else: + b = prefix + while True: + try: + s = read(self.decodermaxblock) + except (IndexError, struct.error): # truncated png/gif + if LOAD_TRUNCATED_IMAGES: + break + else: + raise IOError("image file is truncated") + + if not s: # truncated jpeg + if LOAD_TRUNCATED_IMAGES: + break + else: + self.tile = [] + raise IOError("image file is truncated " + "(%d bytes not processed)" % len(b)) + + b = b + s + n, err_code = decoder.decode(b) + if n < 0: + break + b = b[n:] + finally: + # Need to cleanup here to prevent leaks + decoder.cleanup() + + self.tile = [] + self.readonly = readonly + + self.load_end() + + if self._exclusive_fp and self._close_exclusive_fp_after_loading: + self.fp.close() + self.fp = None + + if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0: + # still raised if decoder fails to return anything + raise_ioerror(err_code) + + return Image.Image.load(self) + + def load_prepare(self): + # create image memory if necessary + if not self.im or\ + self.im.mode != self.mode or self.im.size != self.size: + self.im = Image.core.new(self.mode, self.size) + # create palette (optional) + if self.mode == "P": + Image.Image.load(self) + + def load_end(self): + # may be overridden + pass + + # may be defined for contained formats + # def load_seek(self, pos): + # pass + + # may be defined for blocked formats (e.g. PNG) + # def load_read(self, bytes): + # pass + + def _seek_check(self, frame): + if (frame < self._min_frame or + # Only check upper limit on frames if additional seek operations + # are not required to do so + (not (hasattr(self, "_n_frames") and self._n_frames is None) and + frame >= self.n_frames+self._min_frame)): + raise EOFError("attempt to seek outside sequence") + + return self.tell() != frame + + +class StubImageFile(ImageFile): + """ + Base class for stub image loaders. + + A stub loader is an image loader that can identify files of a + certain format, but relies on external code to load the file. + """ + + def _open(self): + raise NotImplementedError( + "StubImageFile subclass must implement _open" + ) + + def load(self): + loader = self._load() + if loader is None: + raise IOError("cannot find loader for this %s file" % self.format) + image = loader.load(self) + assert image is not None + # become the other object (!) + self.__class__ = image.__class__ + self.__dict__ = image.__dict__ + + def _load(self): + "(Hook) Find actual image loader." + raise NotImplementedError( + "StubImageFile subclass must implement _load" + ) + + +class Parser(object): + """ + Incremental image parser. This class implements the standard + feed/close consumer interface. + """ + incremental = None + image = None + data = None + decoder = None + offset = 0 + finished = 0 + + def reset(self): + """ + (Consumer) Reset the parser. Note that you can only call this + method immediately after you've created a parser; parser + instances cannot be reused. + """ + assert self.data is None, "cannot reuse parsers" + + def feed(self, data): + """ + (Consumer) Feed data to the parser. + + :param data: A string buffer. + :exception IOError: If the parser failed to parse the image file. + """ + # collect data + + if self.finished: + return + + if self.data is None: + self.data = data + else: + self.data = self.data + data + + # parse what we have + if self.decoder: + + if self.offset > 0: + # skip header + skip = min(len(self.data), self.offset) + self.data = self.data[skip:] + self.offset = self.offset - skip + if self.offset > 0 or not self.data: + return + + n, e = self.decoder.decode(self.data) + + if n < 0: + # end of stream + self.data = None + self.finished = 1 + if e < 0: + # decoding error + self.image = None + raise_ioerror(e) + else: + # end of image + return + self.data = self.data[n:] + + elif self.image: + + # if we end up here with no decoder, this file cannot + # be incrementally parsed. wait until we've gotten all + # available data + pass + + else: + + # attempt to open this file + try: + with io.BytesIO(self.data) as fp: + im = Image.open(fp) + except IOError: + # traceback.print_exc() + pass # not enough data + else: + flag = hasattr(im, "load_seek") or hasattr(im, "load_read") + if flag or len(im.tile) != 1: + # custom load code, or multiple tiles + self.decode = None + else: + # initialize decoder + im.load_prepare() + d, e, o, a = im.tile[0] + im.tile = [] + self.decoder = Image._getdecoder( + im.mode, d, a, im.decoderconfig + ) + self.decoder.setimage(im.im, e) + + # calculate decoder offset + self.offset = o + if self.offset <= len(self.data): + self.data = self.data[self.offset:] + self.offset = 0 + + self.image = im + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + """ + (Consumer) Close the stream. + + :returns: An image object. + :exception IOError: If the parser failed to parse the image file either + because it cannot be identified or cannot be + decoded. + """ + # finish decoding + if self.decoder: + # get rid of what's left in the buffers + self.feed(b"") + self.data = self.decoder = None + if not self.finished: + raise IOError("image was incomplete") + if not self.image: + raise IOError("cannot parse this image") + if self.data: + # incremental parsing not possible; reopen the file + # not that we have all data + with io.BytesIO(self.data) as fp: + try: + self.image = Image.open(fp) + finally: + self.image.load() + return self.image + + +# -------------------------------------------------------------------- + +def _save(im, fp, tile, bufsize=0): + """Helper to save image based on tile list + + :param im: Image object. + :param fp: File object. + :param tile: Tile list. + :param bufsize: Optional buffer size + """ + + im.load() + if not hasattr(im, "encoderconfig"): + im.encoderconfig = () + tile.sort(key=_tilesort) + # FIXME: make MAXBLOCK a configuration parameter + # It would be great if we could have the encoder specify what it needs + # But, it would need at least the image size in most cases. RawEncode is + # a tricky case. + bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c + if fp == sys.stdout: + fp.flush() + return + try: + fh = fp.fileno() + fp.flush() + except (AttributeError, io.UnsupportedOperation): + # compress to Python file-compatible object + for e, b, o, a in tile: + e = Image._getencoder(im.mode, e, a, im.encoderconfig) + if o > 0: + fp.seek(o, 0) + e.setimage(im.im, b) + if e.pushes_fd: + e.setfd(fp) + l, s = e.encode_to_pyfd() + else: + while True: + l, s, d = e.encode(bufsize) + fp.write(d) + if s: + break + if s < 0: + raise IOError("encoder error %d when writing image file" % s) + e.cleanup() + else: + # slight speedup: compress to real file object + for e, b, o, a in tile: + e = Image._getencoder(im.mode, e, a, im.encoderconfig) + if o > 0: + fp.seek(o, 0) + e.setimage(im.im, b) + if e.pushes_fd: + e.setfd(fp) + l, s = e.encode_to_pyfd() + else: + s = e.encode_to_file(fh, bufsize) + if s < 0: + raise IOError("encoder error %d when writing image file" % s) + e.cleanup() + if hasattr(fp, "flush"): + fp.flush() + + +def _safe_read(fp, size): + """ + Reads large blocks in a safe way. Unlike fp.read(n), this function + doesn't trust the user. If the requested size is larger than + SAFEBLOCK, the file is read block by block. + + :param fp: File handle. Must implement a read method. + :param size: Number of bytes to read. + :returns: A string containing up to size bytes of data. + """ + if size <= 0: + return b"" + if size <= SAFEBLOCK: + return fp.read(size) + data = [] + while size > 0: + block = fp.read(min(size, SAFEBLOCK)) + if not block: + break + data.append(block) + size -= len(block) + return b"".join(data) + + +class PyCodecState(object): + def __init__(self): + self.xsize = 0 + self.ysize = 0 + self.xoff = 0 + self.yoff = 0 + + def extents(self): + return (self.xoff, self.yoff, + self.xoff+self.xsize, self.yoff+self.ysize) + + +class PyDecoder(object): + """ + Python implementation of a format decoder. Override this class and + add the decoding logic in the `decode` method. + + See :ref:`Writing Your Own File Decoder in Python` + """ + + _pulls_fd = False + + def __init__(self, mode, *args): + self.im = None + self.state = PyCodecState() + self.fd = None + self.mode = mode + self.init(args) + + def init(self, args): + """ + Override to perform decoder specific initialization + + :param args: Array of args items from the tile entry + :returns: None + """ + self.args = args + + @property + def pulls_fd(self): + return self._pulls_fd + + def decode(self, buffer): + """ + Override to perform the decoding process. + + :param buffer: A bytes object with the data to be decoded. If `handles_eof` + is set, then `buffer` will be empty and `self.fd` will be set. + :returns: A tuple of (bytes consumed, errcode). If finished with decoding + return <0 for the bytes consumed. Err codes are from `ERRORS` + """ + raise NotImplementedError() + + def cleanup(self): + """ + Override to perform decoder specific cleanup + + :returns: None + """ + pass + + def setfd(self, fd): + """ + Called from ImageFile to set the python file-like object + + :param fd: A python file-like object + :returns: None + """ + self.fd = fd + + def setimage(self, im, extents=None): + """ + Called from ImageFile to set the core output image for the decoder + + :param im: A core image object + :param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle + for this tile + :returns: None + """ + + # following c code + self.im = im + + if extents: + (x0, y0, x1, y1) = extents + else: + (x0, y0, x1, y1) = (0, 0, 0, 0) + + if x0 == 0 and x1 == 0: + self.state.xsize, self.state.ysize = self.im.size + else: + self.state.xoff = x0 + self.state.yoff = y0 + self.state.xsize = x1 - x0 + self.state.ysize = y1 - y0 + + if self.state.xsize <= 0 or self.state.ysize <= 0: + raise ValueError("Size cannot be negative") + + if (self.state.xsize + self.state.xoff > self.im.size[0] or + self.state.ysize + self.state.yoff > self.im.size[1]): + raise ValueError("Tile cannot extend outside image") + + def set_as_raw(self, data, rawmode=None): + """ + Convenience method to set the internal image from a stream of raw data + + :param data: Bytes to be set + :param rawmode: The rawmode to be used for the decoder. If not specified, + it will default to the mode of the image + :returns: None + """ + + if not rawmode: + rawmode = self.mode + d = Image._getdecoder(self.mode, 'raw', (rawmode)) + d.setimage(self.im, self.state.extents()) + s = d.decode(data) + + if s[0] >= 0: + raise ValueError("not enough image data") + if s[1] != 0: + raise ValueError("cannot decode image data") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFile.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFile.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c1ea8c75f9a2427c2c0ef7c6804521e08f00d52 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFile.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFilter.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFilter.py new file mode 100644 index 0000000000000000000000000000000000000000..e77349df08de5e2f7b23538633be79996bacea6d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFilter.py @@ -0,0 +1,486 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard filters +# +# History: +# 1995-11-27 fl Created +# 2002-06-08 fl Added rank and mode filters +# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2002 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +from __future__ import division + +import functools + +try: + import numpy +except ImportError: # pragma: no cover + numpy = None + + +class Filter(object): + pass + + +class MultibandFilter(Filter): + pass + + +class Kernel(MultibandFilter): + """ + Create a convolution kernel. The current version only + supports 3x3 and 5x5 integer and floating point kernels. + + In the current version, kernels can only be applied to + "L" and "RGB" images. + + :param size: Kernel size, given as (width, height). In the current + version, this must be (3,3) or (5,5). + :param kernel: A sequence containing kernel weights. + :param scale: Scale factor. If given, the result for each pixel is + divided by this value. the default is the sum of the + kernel weights. + :param offset: Offset. If given, this value is added to the result, + after it has been divided by the scale factor. + """ + name = "Kernel" + + def __init__(self, size, kernel, scale=None, offset=0): + if scale is None: + # default scale is sum of kernel + scale = functools.reduce(lambda a, b: a+b, kernel) + if size[0] * size[1] != len(kernel): + raise ValueError("not enough coefficients in kernel") + self.filterargs = size, scale, offset, kernel + + def filter(self, image): + if image.mode == "P": + raise ValueError("cannot filter palette images") + return image.filter(*self.filterargs) + + +class BuiltinFilter(Kernel): + def __init__(self): + pass + + +class RankFilter(Filter): + """ + Create a rank filter. The rank filter sorts all pixels in + a window of the given size, and returns the **rank**'th value. + + :param size: The kernel size, in pixels. + :param rank: What pixel value to pick. Use 0 for a min filter, + ``size * size / 2`` for a median filter, ``size * size - 1`` + for a max filter, etc. + """ + name = "Rank" + + def __init__(self, size, rank): + self.size = size + self.rank = rank + + def filter(self, image): + if image.mode == "P": + raise ValueError("cannot filter palette images") + image = image.expand(self.size//2, self.size//2) + return image.rankfilter(self.size, self.rank) + + +class MedianFilter(RankFilter): + """ + Create a median filter. Picks the median pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + name = "Median" + + def __init__(self, size=3): + self.size = size + self.rank = size*size//2 + + +class MinFilter(RankFilter): + """ + Create a min filter. Picks the lowest pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + name = "Min" + + def __init__(self, size=3): + self.size = size + self.rank = 0 + + +class MaxFilter(RankFilter): + """ + Create a max filter. Picks the largest pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + name = "Max" + + def __init__(self, size=3): + self.size = size + self.rank = size*size-1 + + +class ModeFilter(Filter): + """ + Create a mode filter. Picks the most frequent pixel value in a box with the + given size. Pixel values that occur only once or twice are ignored; if no + pixel value occurs more than twice, the original pixel value is preserved. + + :param size: The kernel size, in pixels. + """ + name = "Mode" + + def __init__(self, size=3): + self.size = size + + def filter(self, image): + return image.modefilter(self.size) + + +class GaussianBlur(MultibandFilter): + """Gaussian blur filter. + + :param radius: Blur radius. + """ + name = "GaussianBlur" + + def __init__(self, radius=2): + self.radius = radius + + def filter(self, image): + return image.gaussian_blur(self.radius) + + +class BoxBlur(MultibandFilter): + """Blurs the image by setting each pixel to the average value of the pixels + in a square box extending radius pixels in each direction. + Supports float radius of arbitrary size. Uses an optimized implementation + which runs in linear time relative to the size of the image + for any radius value. + + :param radius: Size of the box in one direction. Radius 0 does not blur, + returns an identical image. Radius 1 takes 1 pixel + in each direction, i.e. 9 pixels in total. + """ + name = "BoxBlur" + + def __init__(self, radius): + self.radius = radius + + def filter(self, image): + return image.box_blur(self.radius) + + +class UnsharpMask(MultibandFilter): + """Unsharp mask filter. + + See Wikipedia's entry on `digital unsharp masking`_ for an explanation of + the parameters. + + :param radius: Blur Radius + :param percent: Unsharp strength, in percent + :param threshold: Threshold controls the minimum brightness change that + will be sharpened + + .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking + + """ + name = "UnsharpMask" + + def __init__(self, radius=2, percent=150, threshold=3): + self.radius = radius + self.percent = percent + self.threshold = threshold + + def filter(self, image): + return image.unsharp_mask(self.radius, self.percent, self.threshold) + + +class BLUR(BuiltinFilter): + name = "Blur" + filterargs = (5, 5), 16, 0, ( + 1, 1, 1, 1, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 1, 1, 1, 1 + ) + + +class CONTOUR(BuiltinFilter): + name = "Contour" + filterargs = (3, 3), 1, 255, ( + -1, -1, -1, + -1, 8, -1, + -1, -1, -1 + ) + + +class DETAIL(BuiltinFilter): + name = "Detail" + filterargs = (3, 3), 6, 0, ( + 0, -1, 0, + -1, 10, -1, + 0, -1, 0 + ) + + +class EDGE_ENHANCE(BuiltinFilter): + name = "Edge-enhance" + filterargs = (3, 3), 2, 0, ( + -1, -1, -1, + -1, 10, -1, + -1, -1, -1 + ) + + +class EDGE_ENHANCE_MORE(BuiltinFilter): + name = "Edge-enhance More" + filterargs = (3, 3), 1, 0, ( + -1, -1, -1, + -1, 9, -1, + -1, -1, -1 + ) + + +class EMBOSS(BuiltinFilter): + name = "Emboss" + filterargs = (3, 3), 1, 128, ( + -1, 0, 0, + 0, 1, 0, + 0, 0, 0 + ) + + +class FIND_EDGES(BuiltinFilter): + name = "Find Edges" + filterargs = (3, 3), 1, 0, ( + -1, -1, -1, + -1, 8, -1, + -1, -1, -1 + ) + + +class SHARPEN(BuiltinFilter): + name = "Sharpen" + filterargs = (3, 3), 16, 0, ( + -2, -2, -2, + -2, 32, -2, + -2, -2, -2 + ) + + +class SMOOTH(BuiltinFilter): + name = "Smooth" + filterargs = (3, 3), 13, 0, ( + 1, 1, 1, + 1, 5, 1, + 1, 1, 1 + ) + + +class SMOOTH_MORE(BuiltinFilter): + name = "Smooth More" + filterargs = (5, 5), 100, 0, ( + 1, 1, 1, 1, 1, + 1, 5, 5, 5, 1, + 1, 5, 44, 5, 1, + 1, 5, 5, 5, 1, + 1, 1, 1, 1, 1 + ) + + +class Color3DLUT(MultibandFilter): + """Three-dimensional color lookup table. + + Transforms 3-channel pixels using the values of the channels as coordinates + in the 3D lookup table and interpolating the nearest elements. + + This method allows you to apply almost any color transformation + in constant time by using pre-calculated decimated tables. + + .. versionadded:: 5.2.0 + + :param size: Size of the table. One int or tuple of (int, int, int). + Minimal size in any dimension is 2, maximum is 65. + :param table: Flat lookup table. A list of ``channels * size**3`` + float elements or a list of ``size**3`` channels-sized + tuples with floats. Channels are changed first, + then first dimension, then second, then third. + Value 0.0 corresponds lowest value of output, 1.0 highest. + :param channels: Number of channels in the table. Could be 3 or 4. + Default is 3. + :param target_mode: A mode for the result image. Should have not less + than ``channels`` channels. Default is ``None``, + which means that mode wouldn't be changed. + """ + name = "Color 3D LUT" + + def __init__(self, size, table, channels=3, target_mode=None, **kwargs): + if channels not in (3, 4): + raise ValueError("Only 3 or 4 output channels are supported") + self.size = size = self._check_size(size) + self.channels = channels + self.mode = target_mode + + # Hidden flag `_copy_table=False` could be used to avoid extra copying + # of the table if the table is specially made for the constructor. + copy_table = kwargs.get('_copy_table', True) + items = size[0] * size[1] * size[2] + wrong_size = False + + if numpy and isinstance(table, numpy.ndarray): + if copy_table: + table = table.copy() + + if table.shape in [(items * channels,), (items, channels), + (size[2], size[1], size[0], channels)]: + table = table.reshape(items * channels) + else: + wrong_size = True + + else: + if copy_table: + table = list(table) + + # Convert to a flat list + if table and isinstance(table[0], (list, tuple)): + table, raw_table = [], table + for pixel in raw_table: + if len(pixel) != channels: + raise ValueError( + "The elements of the table should " + "have a length of {}.".format(channels)) + table.extend(pixel) + + if wrong_size or len(table) != items * channels: + raise ValueError( + "The table should have either channels * size**3 float items " + "or size**3 items of channels-sized tuples with floats. " + "Table should be: {}x{}x{}x{}. Actual length: {}".format( + channels, size[0], size[1], size[2], len(table))) + self.table = table + + @staticmethod + def _check_size(size): + try: + _, _, _ = size + except ValueError: + raise ValueError("Size should be either an integer or " + "a tuple of three integers.") + except TypeError: + size = (size, size, size) + size = [int(x) for x in size] + for size1D in size: + if not 2 <= size1D <= 65: + raise ValueError("Size should be in [2, 65] range.") + return size + + @classmethod + def generate(cls, size, callback, channels=3, target_mode=None): + """Generates new LUT using provided callback. + + :param size: Size of the table. Passed to the constructor. + :param callback: Function with three parameters which correspond + three color channels. Will be called ``size**3`` + times with values from 0.0 to 1.0 and should return + a tuple with ``channels`` elements. + :param channels: The number of channels which should return callback. + :param target_mode: Passed to the constructor of the resulting + lookup table. + """ + size1D, size2D, size3D = cls._check_size(size) + if channels not in (3, 4): + raise ValueError("Only 3 or 4 output channels are supported") + + table = [0] * (size1D * size2D * size3D * channels) + idx_out = 0 + for b in range(size3D): + for g in range(size2D): + for r in range(size1D): + table[idx_out:idx_out + channels] = callback( + r / (size1D-1), g / (size2D-1), b / (size3D-1)) + idx_out += channels + + return cls((size1D, size2D, size3D), table, channels=channels, + target_mode=target_mode, _copy_table=False) + + def transform(self, callback, with_normals=False, channels=None, + target_mode=None): + """Transforms the table values using provided callback and returns + a new LUT with altered values. + + :param callback: A function which takes old lookup table values + and returns a new set of values. The number + of arguments which function should take is + ``self.channels`` or ``3 + self.channels`` + if ``with_normals`` flag is set. + Should return a tuple of ``self.channels`` or + ``channels`` elements if it is set. + :param with_normals: If true, ``callback`` will be called with + coordinates in the color cube as the first + three arguments. Otherwise, ``callback`` + will be called only with actual color values. + :param channels: The number of channels in the resulting lookup table. + :param target_mode: Passed to the constructor of the resulting + lookup table. + """ + if channels not in (None, 3, 4): + raise ValueError("Only 3 or 4 output channels are supported") + ch_in = self.channels + ch_out = channels or ch_in + size1D, size2D, size3D = self.size + + table = [0] * (size1D * size2D * size3D * ch_out) + idx_in = 0 + idx_out = 0 + for b in range(size3D): + for g in range(size2D): + for r in range(size1D): + values = self.table[idx_in:idx_in + ch_in] + if with_normals: + values = callback(r / (size1D-1), g / (size2D-1), + b / (size3D-1), *values) + else: + values = callback(*values) + table[idx_out:idx_out + ch_out] = values + idx_in += ch_in + idx_out += ch_out + + return type(self)(self.size, table, channels=ch_out, + target_mode=target_mode or self.mode, + _copy_table=False) + + def __repr__(self): + r = [ + "{} from {}".format(self.__class__.__name__, + self.table.__class__.__name__), + "size={:d}x{:d}x{:d}".format(*self.size), + "channels={:d}".format(self.channels), + ] + if self.mode: + r.append("target_mode={}".format(self.mode)) + return "<{}>".format(" ".join(r)) + + def filter(self, image): + from . import Image + + return image.color_lut_3d( + self.mode or image.mode, Image.LINEAR, self.channels, + self.size[0], self.size[1], self.size[2], self.table) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFilter.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFilter.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0440b088b76b1186881f6ce7692cfa0eb0358b8 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFilter.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFont.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFont.py new file mode 100644 index 0000000000000000000000000000000000000000..3ac29e8f6f95b6475dfb43a1f6635aed997cbb63 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFont.py @@ -0,0 +1,471 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PIL raster font management +# +# History: +# 1996-08-07 fl created (experimental) +# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3 +# 1999-02-06 fl rewrote most font management stuff in C +# 1999-03-17 fl take pth files into account in load_path (from Richard Jones) +# 2001-02-17 fl added freetype support +# 2001-05-09 fl added TransposedFont wrapper class +# 2002-03-04 fl make sure we have a "L" or "1" font +# 2002-12-04 fl skip non-directory entries in the system path +# 2003-04-29 fl add embedded default font +# 2003-09-27 fl added support for truetype charmap encodings +# +# Todo: +# Adapt to PILFONT2 format (16-bit fonts, compressed, single file) +# +# Copyright (c) 1997-2003 by Secret Labs AB +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image +from ._util import isDirectory, isPath, py3 +import os +import sys + +LAYOUT_BASIC = 0 +LAYOUT_RAQM = 1 + + +class _imagingft_not_installed(object): + # module placeholder + def __getattr__(self, id): + raise ImportError("The _imagingft C module is not installed") + + +try: + from . import _imagingft as core +except ImportError: + core = _imagingft_not_installed() + + +# FIXME: add support for pilfont2 format (see FontFile.py) + +# -------------------------------------------------------------------- +# Font metrics format: +# "PILfont" LF +# fontdescriptor LF +# (optional) key=value... LF +# "DATA" LF +# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox) +# +# To place a character, cut out srcbox and paste at dstbox, +# relative to the character position. Then move the character +# position according to dx, dy. +# -------------------------------------------------------------------- + + +class ImageFont(object): + "PIL font wrapper" + + def _load_pilfont(self, filename): + + with open(filename, "rb") as fp: + for ext in (".png", ".gif", ".pbm"): + try: + fullname = os.path.splitext(filename)[0] + ext + image = Image.open(fullname) + except: + pass + else: + if image and image.mode in ("1", "L"): + break + else: + raise IOError("cannot find glyph data file") + + self.file = fullname + + return self._load_pilfont_data(fp, image) + + def _load_pilfont_data(self, file, image): + + # read PILfont header + if file.readline() != b"PILfont\n": + raise SyntaxError("Not a PILfont file") + file.readline().split(b";") + self.info = [] # FIXME: should be a dictionary + while True: + s = file.readline() + if not s or s == b"DATA\n": + break + self.info.append(s) + + # read PILfont metrics + data = file.read(256*20) + + # check image + if image.mode not in ("1", "L"): + raise TypeError("invalid font image mode") + + image.load() + + self.font = Image.core.font(image.im, data) + + def getsize(self, text, *args, **kwargs): + return self.font.getsize(text) + + def getmask(self, text, mode="", *args, **kwargs): + return self.font.getmask(text, mode) + + +## +# Wrapper for FreeType fonts. Application code should use the +# truetype factory function to create font objects. + +class FreeTypeFont(object): + "FreeType font wrapper (requires _imagingft service)" + + def __init__(self, font=None, size=10, index=0, encoding="", + layout_engine=None): + # FIXME: use service provider instead + + self.path = font + self.size = size + self.index = index + self.encoding = encoding + + if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM): + layout_engine = LAYOUT_BASIC + if core.HAVE_RAQM: + layout_engine = LAYOUT_RAQM + if layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM: + layout_engine = LAYOUT_BASIC + + self.layout_engine = layout_engine + + if isPath(font): + self.font = core.getfont(font, size, index, encoding, layout_engine=layout_engine) + else: + self.font_bytes = font.read() + self.font = core.getfont( + "", size, index, encoding, self.font_bytes, layout_engine) + + def _multiline_split(self, text): + split_character = "\n" if isinstance(text, str) else b"\n" + return text.split(split_character) + + def getname(self): + return self.font.family, self.font.style + + def getmetrics(self): + return self.font.ascent, self.font.descent + + def getsize(self, text, direction=None, features=None): + size, offset = self.font.getsize(text, direction, features) + return (size[0] + offset[0], size[1] + offset[1]) + + def getsize_multiline(self, text, direction=None, spacing=4, features=None): + max_width = 0 + lines = self._multiline_split(text) + line_spacing = self.getsize('A')[1] + spacing + for line in lines: + line_width, line_height = self.getsize(line, direction, features) + max_width = max(max_width, line_width) + + return max_width, len(lines)*line_spacing - spacing + + def getoffset(self, text): + return self.font.getsize(text)[1] + + def getmask(self, text, mode="", direction=None, features=None): + return self.getmask2(text, mode, direction=direction, features=features)[0] + + def getmask2(self, text, mode="", fill=Image.core.fill, direction=None, features=None, *args, **kwargs): + size, offset = self.font.getsize(text, direction, features) + im = fill("L", size, 0) + self.font.render(text, im.id, mode == "1", direction, features) + return im, offset + + def font_variant(self, font=None, size=None, index=None, encoding=None, + layout_engine=None): + """ + Create a copy of this FreeTypeFont object, + using any specified arguments to override the settings. + + Parameters are identical to the parameters used to initialize this + object. + + :return: A FreeTypeFont object. + """ + return FreeTypeFont(font=self.path if font is None else font, + size=self.size if size is None else size, + index=self.index if index is None else index, + encoding=self.encoding if encoding is None else encoding, + layout_engine=self.layout_engine if layout_engine is None else layout_engine + ) + + +class TransposedFont(object): + "Wrapper for writing rotated or mirrored text" + + def __init__(self, font, orientation=None): + """ + Wrapper that creates a transposed font from any existing font + object. + + :param font: A font object. + :param orientation: An optional orientation. If given, this should + be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM, + Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270. + """ + self.font = font + self.orientation = orientation # any 'transpose' argument, or None + + def getsize(self, text, *args, **kwargs): + w, h = self.font.getsize(text) + if self.orientation in (Image.ROTATE_90, Image.ROTATE_270): + return h, w + return w, h + + def getmask(self, text, mode="", *args, **kwargs): + im = self.font.getmask(text, mode, *args, **kwargs) + if self.orientation is not None: + return im.transpose(self.orientation) + return im + + +def load(filename): + """ + Load a font file. This function loads a font object from the given + bitmap font file, and returns the corresponding font object. + + :param filename: Name of font file. + :return: A font object. + :exception IOError: If the file could not be read. + """ + f = ImageFont() + f._load_pilfont(filename) + return f + + +def truetype(font=None, size=10, index=0, encoding="", + layout_engine=None): + """ + Load a TrueType or OpenType font from a file or file-like object, + and create a font object. + This function loads a font object from the given file or file-like + object, and creates a font object for a font of the given size. + + This function requires the _imagingft service. + + :param font: A filename or file-like object containing a TrueType font. + Under Windows, if the file is not found in this filename, + the loader also looks in Windows :file:`fonts/` directory. + :param size: The requested size, in points. + :param index: Which font face to load (default is first available face). + :param encoding: Which font encoding to use (default is Unicode). Common + encodings are "unic" (Unicode), "symb" (Microsoft + Symbol), "ADOB" (Adobe Standard), "ADBE" (Adobe Expert), + and "armn" (Apple Roman). See the FreeType documentation + for more information. + :param layout_engine: Which layout engine to use, if available: + `ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`. + :return: A font object. + :exception IOError: If the file could not be read. + """ + + try: + return FreeTypeFont(font, size, index, encoding, layout_engine) + except IOError: + ttf_filename = os.path.basename(font) + + dirs = [] + if sys.platform == "win32": + # check the windows font repository + # NOTE: must use uppercase WINDIR, to work around bugs in + # 1.5.2's os.environ.get() + windir = os.environ.get("WINDIR") + if windir: + dirs.append(os.path.join(windir, "fonts")) + elif sys.platform in ('linux', 'linux2'): + lindirs = os.environ.get("XDG_DATA_DIRS", "") + if not lindirs: + # According to the freedesktop spec, XDG_DATA_DIRS should + # default to /usr/share + lindirs = '/usr/share' + dirs += [os.path.join(lindir, "fonts") + for lindir in lindirs.split(":")] + elif sys.platform == 'darwin': + dirs += ['/Library/Fonts', '/System/Library/Fonts', + os.path.expanduser('~/Library/Fonts')] + + ext = os.path.splitext(ttf_filename)[1] + first_font_with_a_different_extension = None + for directory in dirs: + for walkroot, walkdir, walkfilenames in os.walk(directory): + for walkfilename in walkfilenames: + if ext and walkfilename == ttf_filename: + fontpath = os.path.join(walkroot, walkfilename) + return FreeTypeFont(fontpath, size, index, encoding, layout_engine) + elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename: + fontpath = os.path.join(walkroot, walkfilename) + if os.path.splitext(fontpath)[1] == '.ttf': + return FreeTypeFont(fontpath, size, index, encoding, layout_engine) + if not ext and first_font_with_a_different_extension is None: + first_font_with_a_different_extension = fontpath + if first_font_with_a_different_extension: + return FreeTypeFont(first_font_with_a_different_extension, size, + index, encoding, layout_engine) + raise + + +def load_path(filename): + """ + Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a + bitmap font along the Python path. + + :param filename: Name of font file. + :return: A font object. + :exception IOError: If the file could not be read. + """ + for directory in sys.path: + if isDirectory(directory): + if not isinstance(filename, str): + if py3: + filename = filename.decode("utf-8") + else: + filename = filename.encode("utf-8") + try: + return load(os.path.join(directory, filename)) + except IOError: + pass + raise IOError("cannot find font file") + + +def load_default(): + """Load a "better than nothing" default font. + + .. versionadded:: 1.1.4 + + :return: A font object. + """ + from io import BytesIO + import base64 + f = ImageFont() + f._load_pilfont_data( + # courB08 + BytesIO(base64.b64decode(b''' +UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA +BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL +AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA +AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB +ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A +BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB +//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA +AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH +AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA +ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv +AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/ +/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5 +AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA +AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG +AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA +BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA +AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA +2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF +AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA//// ++gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA +////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA +BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv +AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA +AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA +AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA +BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP// +//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA +AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF +AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB +mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn +AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA +AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7 +AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA +Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB +//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA +AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ +AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC +DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ +AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/ ++wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5 +AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/ +///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG +AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA +BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA +Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC +eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG +AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA//// ++gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA +////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA +BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT +AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A +AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA +Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA +Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP// +//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA +AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ +AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA +LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5 +AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA +AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5 +AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA +AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG +AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA +EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK +AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA +pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG +AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA//// ++QAGAAIAzgAKANUAEw== +''')), Image.open(BytesIO(base64.b64decode(b''' +iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u +Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9 +M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g +LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F +IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA +Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791 +NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx +in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9 +SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY +AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt +y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG +ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY +lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H +/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3 +AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47 +c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/ +/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw +pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv +oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR +evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA +AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v// +Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR +w7IkEbzhVQAAAABJRU5ErkJggg== +''')))) + return f diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFont.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFont.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00ab6667fec149af2c9ca0db4d750a2a7f853585 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageFont.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageGrab.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageGrab.py new file mode 100644 index 0000000000000000000000000000000000000000..712b02cd0d983cf139fcb6aec2bda5a9b9f7e15b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageGrab.py @@ -0,0 +1,80 @@ +# +# The Python Imaging Library +# $Id$ +# +# screen grabber (macOS and Windows only) +# +# History: +# 2001-04-26 fl created +# 2001-09-17 fl use builtin driver, if present +# 2002-11-19 fl added grabclipboard support +# +# Copyright (c) 2001-2002 by Secret Labs AB +# Copyright (c) 2001-2002 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image + +import sys +if sys.platform not in ["win32", "darwin"]: + raise ImportError("ImageGrab is macOS and Windows only") + +if sys.platform == "win32": + grabber = Image.core.grabscreen +elif sys.platform == "darwin": + import os + import tempfile + import subprocess + + +def grab(bbox=None): + if sys.platform == "darwin": + fh, filepath = tempfile.mkstemp('.png') + os.close(fh) + subprocess.call(['screencapture', '-x', filepath]) + im = Image.open(filepath) + im.load() + os.unlink(filepath) + else: + size, data = grabber() + im = Image.frombytes( + "RGB", size, data, + # RGB, 32-bit line padding, origin lower left corner + "raw", "BGR", (size[0]*3 + 3) & -4, -1 + ) + if bbox: + im = im.crop(bbox) + return im + + +def grabclipboard(): + if sys.platform == "darwin": + fh, filepath = tempfile.mkstemp('.jpg') + os.close(fh) + commands = [ + "set theFile to (open for access POSIX file \""+filepath+"\" with write permission)", + "try", + "write (the clipboard as JPEG picture) to theFile", + "end try", + "close access theFile" + ] + script = ["osascript"] + for command in commands: + script += ["-e", command] + subprocess.call(script) + + im = None + if os.stat(filepath).st_size != 0: + im = Image.open(filepath) + im.load() + os.unlink(filepath) + return im + else: + data = Image.core.grabclipboard() + if isinstance(data, bytes): + from . import BmpImagePlugin + import io + return BmpImagePlugin.DibImageFile(io.BytesIO(data)) + return data diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageGrab.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageGrab.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b191eb168236503c3354b38c59963a5c2a8c0ad9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageGrab.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMath.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMath.py new file mode 100644 index 0000000000000000000000000000000000000000..d985877a6c851c996c6b19e7aa0a5f950e6100a8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMath.py @@ -0,0 +1,271 @@ +# +# The Python Imaging Library +# $Id$ +# +# a simple math add-on for the Python Imaging Library +# +# History: +# 1999-02-15 fl Original PIL Plus release +# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6 +# 2005-09-12 fl Fixed int() and float() for Python 2.4.1 +# +# Copyright (c) 1999-2005 by Secret Labs AB +# Copyright (c) 2005 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, _imagingmath +from ._util import py3 + +try: + import builtins +except ImportError: + import __builtin__ + builtins = __builtin__ + +VERBOSE = 0 + + +def _isconstant(v): + return isinstance(v, int) or isinstance(v, float) + + +class _Operand(object): + """Wraps an image operand, providing standard operators""" + + def __init__(self, im): + self.im = im + + def __fixup(self, im1): + # convert image to suitable mode + if isinstance(im1, _Operand): + # argument was an image. + if im1.im.mode in ("1", "L"): + return im1.im.convert("I") + elif im1.im.mode in ("I", "F"): + return im1.im + else: + raise ValueError("unsupported mode: %s" % im1.im.mode) + else: + # argument was a constant + if _isconstant(im1) and self.im.mode in ("1", "L", "I"): + return Image.new("I", self.im.size, im1) + else: + return Image.new("F", self.im.size, im1) + + def apply(self, op, im1, im2=None, mode=None): + im1 = self.__fixup(im1) + if im2 is None: + # unary operation + out = Image.new(mode or im1.mode, im1.size, None) + im1.load() + try: + op = getattr(_imagingmath, op+"_"+im1.mode) + except AttributeError: + raise TypeError("bad operand type for '%s'" % op) + _imagingmath.unop(op, out.im.id, im1.im.id) + else: + # binary operation + im2 = self.__fixup(im2) + if im1.mode != im2.mode: + # convert both arguments to floating point + if im1.mode != "F": + im1 = im1.convert("F") + if im2.mode != "F": + im2 = im2.convert("F") + if im1.mode != im2.mode: + raise ValueError("mode mismatch") + if im1.size != im2.size: + # crop both arguments to a common size + size = (min(im1.size[0], im2.size[0]), + min(im1.size[1], im2.size[1])) + if im1.size != size: + im1 = im1.crop((0, 0) + size) + if im2.size != size: + im2 = im2.crop((0, 0) + size) + out = Image.new(mode or im1.mode, size, None) + else: + out = Image.new(mode or im1.mode, im1.size, None) + im1.load() + im2.load() + try: + op = getattr(_imagingmath, op+"_"+im1.mode) + except AttributeError: + raise TypeError("bad operand type for '%s'" % op) + _imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id) + return _Operand(out) + + # unary operators + def __bool__(self): + # an image is "true" if it contains at least one non-zero pixel + return self.im.getbbox() is not None + + if not py3: + # Provide __nonzero__ for pre-Py3k + __nonzero__ = __bool__ + del __bool__ + + def __abs__(self): + return self.apply("abs", self) + + def __pos__(self): + return self + + def __neg__(self): + return self.apply("neg", self) + + # binary operators + def __add__(self, other): + return self.apply("add", self, other) + + def __radd__(self, other): + return self.apply("add", other, self) + + def __sub__(self, other): + return self.apply("sub", self, other) + + def __rsub__(self, other): + return self.apply("sub", other, self) + + def __mul__(self, other): + return self.apply("mul", self, other) + + def __rmul__(self, other): + return self.apply("mul", other, self) + + def __truediv__(self, other): + return self.apply("div", self, other) + + def __rtruediv__(self, other): + return self.apply("div", other, self) + + def __mod__(self, other): + return self.apply("mod", self, other) + + def __rmod__(self, other): + return self.apply("mod", other, self) + + def __pow__(self, other): + return self.apply("pow", self, other) + + def __rpow__(self, other): + return self.apply("pow", other, self) + + if not py3: + # Provide __div__ and __rdiv__ for pre-Py3k + __div__ = __truediv__ + __rdiv__ = __rtruediv__ + del __truediv__ + del __rtruediv__ + + # bitwise + def __invert__(self): + return self.apply("invert", self) + + def __and__(self, other): + return self.apply("and", self, other) + + def __rand__(self, other): + return self.apply("and", other, self) + + def __or__(self, other): + return self.apply("or", self, other) + + def __ror__(self, other): + return self.apply("or", other, self) + + def __xor__(self, other): + return self.apply("xor", self, other) + + def __rxor__(self, other): + return self.apply("xor", other, self) + + def __lshift__(self, other): + return self.apply("lshift", self, other) + + def __rshift__(self, other): + return self.apply("rshift", self, other) + + # logical + def __eq__(self, other): + return self.apply("eq", self, other) + + def __ne__(self, other): + return self.apply("ne", self, other) + + def __lt__(self, other): + return self.apply("lt", self, other) + + def __le__(self, other): + return self.apply("le", self, other) + + def __gt__(self, other): + return self.apply("gt", self, other) + + def __ge__(self, other): + return self.apply("ge", self, other) + + +# conversions +def imagemath_int(self): + return _Operand(self.im.convert("I")) + + +def imagemath_float(self): + return _Operand(self.im.convert("F")) + + +# logical +def imagemath_equal(self, other): + return self.apply("eq", self, other, mode="I") + + +def imagemath_notequal(self, other): + return self.apply("ne", self, other, mode="I") + + +def imagemath_min(self, other): + return self.apply("min", self, other) + + +def imagemath_max(self, other): + return self.apply("max", self, other) + + +def imagemath_convert(self, mode): + return _Operand(self.im.convert(mode)) + + +ops = {} +for k, v in list(globals().items()): + if k[:10] == "imagemath_": + ops[k[10:]] = v + + +def eval(expression, _dict={}, **kw): + """ + Evaluates an image expression. + + :param expression: A string containing a Python-style expression. + :param options: Values to add to the evaluation context. You + can either use a dictionary, or one or more keyword + arguments. + :return: The evaluated expression. This is usually an image object, but can + also be an integer, a floating point value, or a pixel tuple, + depending on the expression. + """ + + # build execution namespace + args = ops.copy() + args.update(_dict) + args.update(kw) + for k, v in list(args.items()): + if hasattr(v, "im"): + args[k] = _Operand(v) + + out = builtins.eval(expression, args) + try: + return out.im + except AttributeError: + return out diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMath.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMath.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e0765bd1ea2262a2c9c8fab23705c4df284d4b5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMath.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMode.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMode.py new file mode 100644 index 0000000000000000000000000000000000000000..b227f2127755277c83e47973d269a7fbb61fa0c0 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMode.py @@ -0,0 +1,55 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard mode descriptors +# +# History: +# 2006-03-20 fl Added +# +# Copyright (c) 2006 by Secret Labs AB. +# Copyright (c) 2006 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +# mode descriptor cache +_modes = None + + +class ModeDescriptor(object): + """Wrapper for mode strings.""" + + def __init__(self, mode, bands, basemode, basetype): + self.mode = mode + self.bands = bands + self.basemode = basemode + self.basetype = basetype + + def __str__(self): + return self.mode + + +def getmode(mode): + """Gets a mode descriptor for the given mode.""" + global _modes + if not _modes: + # initialize mode cache + + from . import Image + modes = {} + # core modes + for m, (basemode, basetype, bands) in Image._MODEINFO.items(): + modes[m] = ModeDescriptor(m, bands, basemode, basetype) + # extra experimental modes + modes["RGBa"] = ModeDescriptor("RGBa", ("R", "G", "B", "a"), "RGB", "L") + modes["LA"] = ModeDescriptor("LA", ("L", "A"), "L", "L") + modes["La"] = ModeDescriptor("La", ("L", "a"), "L", "L") + modes["PA"] = ModeDescriptor("PA", ("P", "A"), "RGB", "L") + # mapping modes + modes["I;16"] = ModeDescriptor("I;16", "I", "L", "L") + modes["I;16L"] = ModeDescriptor("I;16L", "I", "L", "L") + modes["I;16B"] = ModeDescriptor("I;16B", "I", "L", "L") + # set global mode cache atomically + _modes = modes + return _modes[mode] diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMode.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMode.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b1c68074d58072438b8398e7526571f8fa1b29a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMode.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMorph.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMorph.py new file mode 100644 index 0000000000000000000000000000000000000000..579ee4e1a2a053b726b1f2ef30bf610e2733728b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMorph.py @@ -0,0 +1,250 @@ +# A binary morphology add-on for the Python Imaging Library +# +# History: +# 2014-06-04 Initial version. +# +# Copyright (c) 2014 Dov Grobgeld + +from __future__ import print_function + +from . import Image, _imagingmorph +import re + +LUT_SIZE = 1 << 9 + + +class LutBuilder(object): + """A class for building a MorphLut from a descriptive language + + The input patterns is a list of a strings sequences like these:: + + 4:(... + .1. + 111)->1 + + (whitespaces including linebreaks are ignored). The option 4 + describes a series of symmetry operations (in this case a + 4-rotation), the pattern is described by: + + - . or X - Ignore + - 1 - Pixel is on + - 0 - Pixel is off + + The result of the operation is described after "->" string. + + The default is to return the current pixel value, which is + returned if no other match is found. + + Operations: + + - 4 - 4 way rotation + - N - Negate + - 1 - Dummy op for no other operation (an op must always be given) + - M - Mirroring + + Example:: + + lb = LutBuilder(patterns = ["4:(... .1. 111)->1"]) + lut = lb.build_lut() + + """ + def __init__(self, patterns=None, op_name=None): + if patterns is not None: + self.patterns = patterns + else: + self.patterns = [] + self.lut = None + if op_name is not None: + known_patterns = { + 'corner': ['1:(... ... ...)->0', + '4:(00. 01. ...)->1'], + 'dilation4': ['4:(... .0. .1.)->1'], + 'dilation8': ['4:(... .0. .1.)->1', + '4:(... .0. ..1)->1'], + 'erosion4': ['4:(... .1. .0.)->0'], + 'erosion8': ['4:(... .1. .0.)->0', + '4:(... .1. ..0)->0'], + 'edge': ['1:(... ... ...)->0', + '4:(.0. .1. ...)->1', + '4:(01. .1. ...)->1'] + } + if op_name not in known_patterns: + raise Exception('Unknown pattern '+op_name+'!') + + self.patterns = known_patterns[op_name] + + def add_patterns(self, patterns): + self.patterns += patterns + + def build_default_lut(self): + symbols = [0, 1] + m = 1 << 4 # pos of current pixel + self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE)) + + def get_lut(self): + return self.lut + + def _string_permute(self, pattern, permutation): + """string_permute takes a pattern and a permutation and returns the + string permuted according to the permutation list. + """ + assert(len(permutation) == 9) + return ''.join(pattern[p] for p in permutation) + + def _pattern_permute(self, basic_pattern, options, basic_result): + """pattern_permute takes a basic pattern and its result and clones + the pattern according to the modifications described in the $options + parameter. It returns a list of all cloned patterns.""" + patterns = [(basic_pattern, basic_result)] + + # rotations + if '4' in options: + res = patterns[-1][1] + for i in range(4): + patterns.append( + (self._string_permute(patterns[-1][0], [6, 3, 0, + 7, 4, 1, + 8, 5, 2]), res)) + # mirror + if 'M' in options: + n = len(patterns) + for pattern, res in patterns[0:n]: + patterns.append( + (self._string_permute(pattern, [2, 1, 0, + 5, 4, 3, + 8, 7, 6]), res)) + + # negate + if 'N' in options: + n = len(patterns) + for pattern, res in patterns[0:n]: + # Swap 0 and 1 + pattern = (pattern + .replace('0', 'Z') + .replace('1', '0') + .replace('Z', '1')) + res = 1-int(res) + patterns.append((pattern, res)) + + return patterns + + def build_lut(self): + """Compile all patterns into a morphology lut. + + TBD :Build based on (file) morphlut:modify_lut + """ + self.build_default_lut() + patterns = [] + + # Parse and create symmetries of the patterns strings + for p in self.patterns: + m = re.search( + r'(\w*):?\s*\((.+?)\)\s*->\s*(\d)', p.replace('\n', '')) + if not m: + raise Exception('Syntax error in pattern "'+p+'"') + options = m.group(1) + pattern = m.group(2) + result = int(m.group(3)) + + # Get rid of spaces + pattern = pattern.replace(' ', '').replace('\n', '') + + patterns += self._pattern_permute(pattern, options, result) + +# # Debugging +# for p, r in patterns: +# print(p, r) +# print('--') + + # compile the patterns into regular expressions for speed + for i, pattern in enumerate(patterns): + p = pattern[0].replace('.', 'X').replace('X', '[01]') + p = re.compile(p) + patterns[i] = (p, pattern[1]) + + # Step through table and find patterns that match. + # Note that all the patterns are searched. The last one + # caught overrides + for i in range(LUT_SIZE): + # Build the bit pattern + bitpattern = bin(i)[2:] + bitpattern = ('0'*(9-len(bitpattern)) + bitpattern)[::-1] + + for p, r in patterns: + if p.match(bitpattern): + self.lut[i] = [0, 1][r] + + return self.lut + + +class MorphOp(object): + """A class for binary morphological operators""" + + def __init__(self, + lut=None, + op_name=None, + patterns=None): + """Create a binary morphological operator""" + self.lut = lut + if op_name is not None: + self.lut = LutBuilder(op_name=op_name).build_lut() + elif patterns is not None: + self.lut = LutBuilder(patterns=patterns).build_lut() + + def apply(self, image): + """Run a single morphological operation on an image + + Returns a tuple of the number of changed pixels and the + morphed image""" + if self.lut is None: + raise Exception('No operator loaded') + + if image.mode != 'L': + raise Exception('Image must be binary, meaning it must use mode L') + outimage = Image.new(image.mode, image.size, None) + count = _imagingmorph.apply( + bytes(self.lut), image.im.id, outimage.im.id) + return count, outimage + + def match(self, image): + """Get a list of coordinates matching the morphological operation on + an image. + + Returns a list of tuples of (x,y) coordinates + of all matching pixels. See :ref:`coordinate-system`.""" + if self.lut is None: + raise Exception('No operator loaded') + + if image.mode != 'L': + raise Exception('Image must be binary, meaning it must use mode L') + return _imagingmorph.match(bytes(self.lut), image.im.id) + + def get_on_pixels(self, image): + """Get a list of all turned on pixels in a binary image + + Returns a list of tuples of (x,y) coordinates + of all matching pixels. See :ref:`coordinate-system`.""" + + if image.mode != 'L': + raise Exception('Image must be binary, meaning it must use mode L') + return _imagingmorph.get_on_pixels(image.im.id) + + def load_lut(self, filename): + """Load an operator from an mrl file""" + with open(filename, 'rb') as f: + self.lut = bytearray(f.read()) + + if len(self.lut) != LUT_SIZE: + self.lut = None + raise Exception('Wrong size operator file!') + + def save_lut(self, filename): + """Save an operator to an mrl file""" + if self.lut is None: + raise Exception('No operator loaded') + with open(filename, 'wb') as f: + f.write(self.lut) + + def set_lut(self, lut): + """Set the lut from an external source""" + self.lut = lut diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMorph.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMorph.pyc new file mode 100644 index 0000000000000000000000000000000000000000..709dabb4e8f20f0abcf12698c6298d7cc6d5e5f5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageMorph.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageOps.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageOps.py new file mode 100644 index 0000000000000000000000000000000000000000..25d491affa45f2007c161737368dc0a82111193f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageOps.py @@ -0,0 +1,529 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard image operations +# +# History: +# 2001-10-20 fl Created +# 2001-10-23 fl Added autocontrast operator +# 2001-12-18 fl Added Kevin's fit operator +# 2004-03-14 fl Fixed potential division by zero in equalize +# 2005-05-05 fl Fixed equalize for low number of values +# +# Copyright (c) 2001-2004 by Secret Labs AB +# Copyright (c) 2001-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image +from ._util import isStringType +import operator +import functools +import warnings + + +# +# helpers + +def _border(border): + if isinstance(border, tuple): + if len(border) == 2: + left, top = right, bottom = border + elif len(border) == 4: + left, top, right, bottom = border + else: + left = top = right = bottom = border + return left, top, right, bottom + + +def _color(color, mode): + if isStringType(color): + from . import ImageColor + color = ImageColor.getcolor(color, mode) + return color + + +def _lut(image, lut): + if image.mode == "P": + # FIXME: apply to lookup table, not image data + raise NotImplementedError("mode P support coming soon") + elif image.mode in ("L", "RGB"): + if image.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return image.point(lut) + else: + raise IOError("not supported for this image mode") + +# +# actions + + +def autocontrast(image, cutoff=0, ignore=None): + """ + Maximize (normalize) image contrast. This function calculates a + histogram of the input image, removes **cutoff** percent of the + lightest and darkest pixels from the histogram, and remaps the image + so that the darkest pixel becomes black (0), and the lightest + becomes white (255). + + :param image: The image to process. + :param cutoff: How many percent to cut off from the histogram. + :param ignore: The background pixel value (use None for no background). + :return: An image. + """ + histogram = image.histogram() + lut = [] + for layer in range(0, len(histogram), 256): + h = histogram[layer:layer+256] + if ignore is not None: + # get rid of outliers + try: + h[ignore] = 0 + except TypeError: + # assume sequence + for ix in ignore: + h[ix] = 0 + if cutoff: + # cut off pixels from both ends of the histogram + # get number of pixels + n = 0 + for ix in range(256): + n = n + h[ix] + # remove cutoff% pixels from the low end + cut = n * cutoff // 100 + for lo in range(256): + if cut > h[lo]: + cut = cut - h[lo] + h[lo] = 0 + else: + h[lo] -= cut + cut = 0 + if cut <= 0: + break + # remove cutoff% samples from the hi end + cut = n * cutoff // 100 + for hi in range(255, -1, -1): + if cut > h[hi]: + cut = cut - h[hi] + h[hi] = 0 + else: + h[hi] -= cut + cut = 0 + if cut <= 0: + break + # find lowest/highest samples after preprocessing + for lo in range(256): + if h[lo]: + break + for hi in range(255, -1, -1): + if h[hi]: + break + if hi <= lo: + # don't bother + lut.extend(list(range(256))) + else: + scale = 255.0 / (hi - lo) + offset = -lo * scale + for ix in range(256): + ix = int(ix * scale + offset) + if ix < 0: + ix = 0 + elif ix > 255: + ix = 255 + lut.append(ix) + return _lut(image, lut) + + +def colorize(image, black, white): + """ + Colorize grayscale image. The **black** and **white** + arguments should be RGB tuples; this function calculates a color + wedge mapping all black pixels in the source image to the first + color, and all white pixels to the second color. + + :param image: The image to colorize. + :param black: The color to use for black input pixels. + :param white: The color to use for white input pixels. + :return: An image. + """ + assert image.mode == "L" + black = _color(black, "RGB") + white = _color(white, "RGB") + red = [] + green = [] + blue = [] + for i in range(256): + red.append(black[0]+i*(white[0]-black[0])//255) + green.append(black[1]+i*(white[1]-black[1])//255) + blue.append(black[2]+i*(white[2]-black[2])//255) + image = image.convert("RGB") + return _lut(image, red + green + blue) + + +def crop(image, border=0): + """ + Remove border from image. The same amount of pixels are removed + from all four sides. This function works on all image modes. + + .. seealso:: :py:meth:`~PIL.Image.Image.crop` + + :param image: The image to crop. + :param border: The number of pixels to remove. + :return: An image. + """ + left, top, right, bottom = _border(border) + return image.crop( + (left, top, image.size[0]-right, image.size[1]-bottom) + ) + + +def scale(image, factor, resample=Image.NEAREST): + """ + Returns a rescaled image by a specific factor given in parameter. + A factor greater than 1 expands the image, between 0 and 1 contracts the + image. + + :param image: The image to rescale. + :param factor: The expansion factor, as a float. + :param resample: An optional resampling filter. Same values possible as + in the PIL.Image.resize function. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + if factor == 1: + return image.copy() + elif factor <= 0: + raise ValueError("the factor must be greater than 0") + else: + size = (int(round(factor * image.width)), + int(round(factor * image.height))) + return image.resize(size, resample) + + +def deform(image, deformer, resample=Image.BILINEAR): + """ + Deform the image. + + :param image: The image to deform. + :param deformer: A deformer object. Any object that implements a + **getmesh** method can be used. + :param resample: An optional resampling filter. Same values possible as + in the PIL.Image.transform function. + :return: An image. + """ + return image.transform( + image.size, Image.MESH, deformer.getmesh(image), resample + ) + + +def equalize(image, mask=None): + """ + Equalize the image histogram. This function applies a non-linear + mapping to the input image, in order to create a uniform + distribution of grayscale values in the output image. + + :param image: The image to equalize. + :param mask: An optional mask. If given, only the pixels selected by + the mask are included in the analysis. + :return: An image. + """ + if image.mode == "P": + image = image.convert("RGB") + h = image.histogram(mask) + lut = [] + for b in range(0, len(h), 256): + histo = [_f for _f in h[b:b+256] if _f] + if len(histo) <= 1: + lut.extend(list(range(256))) + else: + step = (functools.reduce(operator.add, histo) - histo[-1]) // 255 + if not step: + lut.extend(list(range(256))) + else: + n = step // 2 + for i in range(256): + lut.append(n // step) + n = n + h[i+b] + return _lut(image, lut) + + +def expand(image, border=0, fill=0): + """ + Add border to the image + + :param image: The image to expand. + :param border: Border width, in pixels. + :param fill: Pixel fill value (a color value). Default is 0 (black). + :return: An image. + """ + left, top, right, bottom = _border(border) + width = left + image.size[0] + right + height = top + image.size[1] + bottom + out = Image.new(image.mode, (width, height), _color(fill, image.mode)) + out.paste(image, (left, top)) + return out + + +def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)): + """ + Returns a sized and cropped version of the image, cropped to the + requested aspect ratio and size. + + This function was contributed by Kevin Cazabon. + + :param image: The image to size and crop. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: What resampling method to use. Default is + :py:attr:`PIL.Image.NEAREST`. + :param bleed: Remove a border around the outside of the image (from all + four edges. The value is a decimal percentage (use 0.01 for + one percent). The default value is 0 (no border). + :param centering: Control the cropping position. Use (0.5, 0.5) for + center cropping (e.g. if cropping the width, take 50% off + of the left side, and therefore 50% off the right side). + (0.0, 0.0) will crop from the top left corner (i.e. if + cropping the width, take all of the crop off of the right + side, and if cropping the height, take all of it off the + bottom). (1.0, 0.0) will crop from the bottom left + corner, etc. (i.e. if cropping the width, take all of the + crop off the left side, and if cropping the height take + none from the top, and therefore all off the bottom). + :return: An image. + """ + + # by Kevin Cazabon, Feb 17/2000 + # kevin@cazabon.com + # http://www.cazabon.com + + # ensure inputs are valid + if not isinstance(centering, list): + centering = [centering[0], centering[1]] + + if centering[0] > 1.0 or centering[0] < 0.0: + centering[0] = 0.50 + if centering[1] > 1.0 or centering[1] < 0.0: + centering[1] = 0.50 + + if bleed > 0.49999 or bleed < 0.0: + bleed = 0.0 + + # calculate the area to use for resizing and cropping, subtracting + # the 'bleed' around the edges + + # number of pixels to trim off on Top and Bottom, Left and Right + bleedPixels = ( + int((float(bleed) * float(image.size[0])) + 0.5), + int((float(bleed) * float(image.size[1])) + 0.5) + ) + + liveArea = (0, 0, image.size[0], image.size[1]) + if bleed > 0.0: + liveArea = ( + bleedPixels[0], bleedPixels[1], image.size[0] - bleedPixels[0] - 1, + image.size[1] - bleedPixels[1] - 1 + ) + + liveSize = (liveArea[2] - liveArea[0], liveArea[3] - liveArea[1]) + + # calculate the aspect ratio of the liveArea + liveAreaAspectRatio = float(liveSize[0])/float(liveSize[1]) + + # calculate the aspect ratio of the output image + aspectRatio = float(size[0]) / float(size[1]) + + # figure out if the sides or top/bottom will be cropped off + if liveAreaAspectRatio >= aspectRatio: + # liveArea is wider than what's needed, crop the sides + cropWidth = int((aspectRatio * float(liveSize[1])) + 0.5) + cropHeight = liveSize[1] + else: + # liveArea is taller than what's needed, crop the top and bottom + cropWidth = liveSize[0] + cropHeight = int((float(liveSize[0])/aspectRatio) + 0.5) + + # make the crop + leftSide = int(liveArea[0] + (float(liveSize[0]-cropWidth) * centering[0])) + if leftSide < 0: + leftSide = 0 + topSide = int(liveArea[1] + (float(liveSize[1]-cropHeight) * centering[1])) + if topSide < 0: + topSide = 0 + + out = image.crop( + (leftSide, topSide, leftSide + cropWidth, topSide + cropHeight) + ) + + # resize the image and return it + return out.resize(size, method) + + +def flip(image): + """ + Flip the image vertically (top to bottom). + + :param image: The image to flip. + :return: An image. + """ + return image.transpose(Image.FLIP_TOP_BOTTOM) + + +def grayscale(image): + """ + Convert the image to grayscale. + + :param image: The image to convert. + :return: An image. + """ + return image.convert("L") + + +def invert(image): + """ + Invert (negate) the image. + + :param image: The image to invert. + :return: An image. + """ + lut = [] + for i in range(256): + lut.append(255-i) + return _lut(image, lut) + + +def mirror(image): + """ + Flip image horizontally (left to right). + + :param image: The image to mirror. + :return: An image. + """ + return image.transpose(Image.FLIP_LEFT_RIGHT) + + +def posterize(image, bits): + """ + Reduce the number of bits for each color channel. + + :param image: The image to posterize. + :param bits: The number of bits to keep for each channel (1-8). + :return: An image. + """ + lut = [] + mask = ~(2**(8-bits)-1) + for i in range(256): + lut.append(i & mask) + return _lut(image, lut) + + +def solarize(image, threshold=128): + """ + Invert all pixel values above a threshold. + + :param image: The image to solarize. + :param threshold: All pixels above this greyscale level are inverted. + :return: An image. + """ + lut = [] + for i in range(256): + if i < threshold: + lut.append(i) + else: + lut.append(255-i) + return _lut(image, lut) + + +# -------------------------------------------------------------------- +# PIL USM components, from Kevin Cazabon. + +def gaussian_blur(im, radius=None): + """ PIL_usm.gblur(im, [radius])""" + + warnings.warn( + 'PIL.ImageOps.gaussian_blur is deprecated. ' + 'Use PIL.ImageFilter.GaussianBlur instead. ' + 'This function will be removed in a future version.', + DeprecationWarning + ) + + if radius is None: + radius = 5.0 + + im.load() + + return im.im.gaussian_blur(radius) + + +def gblur(im, radius=None): + """ PIL_usm.gblur(im, [radius])""" + + warnings.warn( + 'PIL.ImageOps.gblur is deprecated. ' + 'Use PIL.ImageFilter.GaussianBlur instead. ' + 'This function will be removed in a future version.', + DeprecationWarning + ) + + return gaussian_blur(im, radius) + + +def unsharp_mask(im, radius=None, percent=None, threshold=None): + """ PIL_usm.usm(im, [radius, percent, threshold])""" + + warnings.warn( + 'PIL.ImageOps.unsharp_mask is deprecated. ' + 'Use PIL.ImageFilter.UnsharpMask instead. ' + 'This function will be removed in a future version.', + DeprecationWarning + ) + + if radius is None: + radius = 5.0 + if percent is None: + percent = 150 + if threshold is None: + threshold = 3 + + im.load() + + return im.im.unsharp_mask(radius, percent, threshold) + + +def usm(im, radius=None, percent=None, threshold=None): + """ PIL_usm.usm(im, [radius, percent, threshold])""" + + warnings.warn( + 'PIL.ImageOps.usm is deprecated. ' + 'Use PIL.ImageFilter.UnsharpMask instead. ' + 'This function will be removed in a future version.', + DeprecationWarning + ) + + return unsharp_mask(im, radius, percent, threshold) + + +def box_blur(image, radius): + """ + Blur the image by setting each pixel to the average value of the pixels + in a square box extending radius pixels in each direction. + Supports float radius of arbitrary size. Uses an optimized implementation + which runs in linear time relative to the size of the image + for any radius value. + + :param image: The image to blur. + :param radius: Size of the box in one direction. Radius 0 does not blur, + returns an identical image. Radius 1 takes 1 pixel + in each direction, i.e. 9 pixels in total. + :return: An image. + """ + warnings.warn( + 'PIL.ImageOps.box_blur is deprecated. ' + 'Use PIL.ImageFilter.BoxBlur instead. ' + 'This function will be removed in a future version.', + DeprecationWarning + ) + + image.load() + + return image._new(image.im.box_blur(radius)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageOps.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageOps.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13d81f851c15864d81457ed21cfc428bbdb354f5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageOps.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePalette.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePalette.py new file mode 100644 index 0000000000000000000000000000000000000000..cecc6458387fa3f331f3fd571111496cef0644dc --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePalette.py @@ -0,0 +1,216 @@ +# +# The Python Imaging Library. +# $Id$ +# +# image palette object +# +# History: +# 1996-03-11 fl Rewritten. +# 1997-01-03 fl Up and running. +# 1997-08-23 fl Added load hack +# 2001-04-16 fl Fixed randint shadow bug in random() +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import array +from . import ImageColor, GimpPaletteFile, GimpGradientFile, PaletteFile + + +class ImagePalette(object): + """ + Color palette for palette mapped images + + :param mode: The mode to use for the Palette. See: + :ref:`concept-modes`. Defaults to "RGB" + :param palette: An optional palette. If given, it must be a bytearray, + an array or a list of ints between 0-255 and of length ``size`` + times the number of colors in ``mode``. The list must be aligned + by channel (All R values must be contiguous in the list before G + and B values.) Defaults to 0 through 255 per channel. + :param size: An optional palette size. If given, it cannot be equal to + or greater than 256. Defaults to 0. + """ + + def __init__(self, mode="RGB", palette=None, size=0): + self.mode = mode + self.rawmode = None # if set, palette contains raw data + self.palette = palette or bytearray(range(256))*len(self.mode) + self.colors = {} + self.dirty = None + if ((size == 0 and len(self.mode)*256 != len(self.palette)) or + (size != 0 and size != len(self.palette))): + raise ValueError("wrong palette size") + + def copy(self): + new = ImagePalette() + + new.mode = self.mode + new.rawmode = self.rawmode + if self.palette is not None: + new.palette = self.palette[:] + new.colors = self.colors.copy() + new.dirty = self.dirty + + return new + + def getdata(self): + """ + Get palette contents in format suitable # for the low-level + ``im.putpalette`` primitive. + + .. warning:: This method is experimental. + """ + if self.rawmode: + return self.rawmode, self.palette + return self.mode + ";L", self.tobytes() + + def tobytes(self): + """Convert palette to bytes. + + .. warning:: This method is experimental. + """ + if self.rawmode: + raise ValueError("palette contains raw palette data") + if isinstance(self.palette, bytes): + return self.palette + arr = array.array("B", self.palette) + if hasattr(arr, 'tobytes'): + return arr.tobytes() + return arr.tostring() + + # Declare tostring as an alias for tobytes + tostring = tobytes + + def getcolor(self, color): + """Given an rgb tuple, allocate palette entry. + + .. warning:: This method is experimental. + """ + if self.rawmode: + raise ValueError("palette contains raw palette data") + if isinstance(color, tuple): + try: + return self.colors[color] + except KeyError: + # allocate new color slot + if isinstance(self.palette, bytes): + self.palette = bytearray(self.palette) + index = len(self.colors) + if index >= 256: + raise ValueError("cannot allocate more than 256 colors") + self.colors[color] = index + self.palette[index] = color[0] + self.palette[index+256] = color[1] + self.palette[index+512] = color[2] + self.dirty = 1 + return index + else: + raise ValueError("unknown color specifier: %r" % color) + + def save(self, fp): + """Save palette to text file. + + .. warning:: This method is experimental. + """ + if self.rawmode: + raise ValueError("palette contains raw palette data") + if isinstance(fp, str): + fp = open(fp, "w") + fp.write("# Palette\n") + fp.write("# Mode: %s\n" % self.mode) + for i in range(256): + fp.write("%d" % i) + for j in range(i*len(self.mode), (i+1)*len(self.mode)): + try: + fp.write(" %d" % self.palette[j]) + except IndexError: + fp.write(" 0") + fp.write("\n") + fp.close() + + +# -------------------------------------------------------------------- +# Internal + +def raw(rawmode, data): + palette = ImagePalette() + palette.rawmode = rawmode + palette.palette = data + palette.dirty = 1 + return palette + + +# -------------------------------------------------------------------- +# Factories + +def make_linear_lut(black, white): + lut = [] + if black == 0: + for i in range(256): + lut.append(white*i//255) + else: + raise NotImplementedError # FIXME + return lut + + +def make_gamma_lut(exp): + lut = [] + for i in range(256): + lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5)) + return lut + + +def negative(mode="RGB"): + palette = list(range(256)) + palette.reverse() + return ImagePalette(mode, palette * len(mode)) + + +def random(mode="RGB"): + from random import randint + palette = [] + for i in range(256*len(mode)): + palette.append(randint(0, 255)) + return ImagePalette(mode, palette) + + +def sepia(white="#fff0c0"): + r, g, b = ImageColor.getrgb(white) + r = make_linear_lut(0, r) + g = make_linear_lut(0, g) + b = make_linear_lut(0, b) + return ImagePalette("RGB", r + g + b) + + +def wedge(mode="RGB"): + return ImagePalette(mode, list(range(256)) * len(mode)) + + +def load(filename): + + # FIXME: supports GIMP gradients only + + with open(filename, "rb") as fp: + + for paletteHandler in [ + GimpPaletteFile.GimpPaletteFile, + GimpGradientFile.GimpGradientFile, + PaletteFile.PaletteFile + ]: + try: + fp.seek(0) + lut = paletteHandler(fp).getpalette() + if lut: + break + except (SyntaxError, ValueError): + # import traceback + # traceback.print_exc() + pass + else: + raise IOError("cannot load palette") + + return lut # data, rawmode diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePalette.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePalette.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b0e50be9ddc78e624dcf4e06551f3e2491970a4 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePalette.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePath.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePath.py new file mode 100644 index 0000000000000000000000000000000000000000..8cbfec0d306f13d00f43005dc3727d4b1cf63361 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePath.py @@ -0,0 +1,20 @@ +# +# The Python Imaging Library +# $Id$ +# +# path interface +# +# History: +# 1996-11-04 fl Created +# 2002-04-14 fl Added documentation stub class +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +Path = Image.core.path diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePath.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePath.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f6c75d1559f6675a21721e9b65a9f962cd3f289 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImagePath.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageQt.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageQt.py new file mode 100644 index 0000000000000000000000000000000000000000..c9dc363128b6e7c90858de9f8fe2c3a5020fca15 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageQt.py @@ -0,0 +1,212 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a simple Qt image interface. +# +# history: +# 2006-06-03 fl: created +# 2006-06-04 fl: inherit from QImage instead of wrapping it +# 2006-06-05 fl: removed toimage helper; move string support to ImageQt +# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com) +# +# Copyright (c) 2006 by Secret Labs AB +# Copyright (c) 2006 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image +from ._util import isPath, py3 +from io import BytesIO +import sys + +qt_versions = [ + ['5', 'PyQt5'], + ['4', 'PyQt4'], + ['side', 'PySide'] +] +# If a version has already been imported, attempt it first +qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True) +for qt_version, qt_module in qt_versions: + try: + if qt_module == 'PyQt5': + from PyQt5.QtGui import QImage, qRgba, QPixmap + from PyQt5.QtCore import QBuffer, QIODevice + elif qt_module == 'PyQt4': + from PyQt4.QtGui import QImage, qRgba, QPixmap + from PyQt4.QtCore import QBuffer, QIODevice + elif qt_module == 'PySide': + from PySide.QtGui import QImage, qRgba, QPixmap + from PySide.QtCore import QBuffer, QIODevice + except (ImportError, RuntimeError): + continue + qt_is_installed = True + break +else: + qt_is_installed = False + qt_version = None + + +def rgb(r, g, b, a=255): + """(Internal) Turns an RGB color into a Qt compatible color integer.""" + # use qRgb to pack the colors, and then turn the resulting long + # into a negative integer with the same bitpattern. + return (qRgba(r, g, b, a) & 0xffffffff) + + +def fromqimage(im): + """ + :param im: A PIL Image object, or a file name + (given either as Python string or a PyQt string object) + """ + buffer = QBuffer() + buffer.open(QIODevice.ReadWrite) + # preserve alha channel with png + # otherwise ppm is more friendly with Image.open + if im.hasAlphaChannel(): + im.save(buffer, 'png') + else: + im.save(buffer, 'ppm') + + b = BytesIO() + try: + b.write(buffer.data()) + except TypeError: + # workaround for Python 2 + b.write(str(buffer.data())) + buffer.close() + b.seek(0) + + return Image.open(b) + + +def fromqpixmap(im): + return fromqimage(im) + # buffer = QBuffer() + # buffer.open(QIODevice.ReadWrite) + # # im.save(buffer) + # # What if png doesn't support some image features like animation? + # im.save(buffer, 'ppm') + # bytes_io = BytesIO() + # bytes_io.write(buffer.data()) + # buffer.close() + # bytes_io.seek(0) + # return Image.open(bytes_io) + + +def align8to32(bytes, width, mode): + """ + converts each scanline of data from 8 bit to 32 bit aligned + """ + + bits_per_pixel = { + '1': 1, + 'L': 8, + 'P': 8, + }[mode] + + # calculate bytes per line and the extra padding if needed + bits_per_line = bits_per_pixel * width + full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8) + bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0) + + extra_padding = -bytes_per_line % 4 + + # already 32 bit aligned by luck + if not extra_padding: + return bytes + + new_data = [] + for i in range(len(bytes) // bytes_per_line): + new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line] + b'\x00' * extra_padding) + + return b''.join(new_data) + + +def _toqclass_helper(im): + data = None + colortable = None + + # handle filename, if given instead of image name + if hasattr(im, "toUtf8"): + # FIXME - is this really the best way to do this? + if py3: + im = str(im.toUtf8(), "utf-8") + else: + im = unicode(im.toUtf8(), "utf-8") + if isPath(im): + im = Image.open(im) + + if im.mode == "1": + format = QImage.Format_Mono + elif im.mode == "L": + format = QImage.Format_Indexed8 + colortable = [] + for i in range(256): + colortable.append(rgb(i, i, i)) + elif im.mode == "P": + format = QImage.Format_Indexed8 + colortable = [] + palette = im.getpalette() + for i in range(0, len(palette), 3): + colortable.append(rgb(*palette[i:i+3])) + elif im.mode == "RGB": + data = im.tobytes("raw", "BGRX") + format = QImage.Format_RGB32 + elif im.mode == "RGBA": + try: + data = im.tobytes("raw", "BGRA") + except SystemError: + # workaround for earlier versions + r, g, b, a = im.split() + im = Image.merge("RGBA", (b, g, r, a)) + format = QImage.Format_ARGB32 + else: + raise ValueError("unsupported image mode %r" % im.mode) + + __data = data or align8to32(im.tobytes(), im.size[0], im.mode) + return { + 'data': __data, 'im': im, 'format': format, 'colortable': colortable + } + + +if qt_is_installed: + class ImageQt(QImage): + + def __init__(self, im): + """ + An PIL image wrapper for Qt. This is a subclass of PyQt's QImage + class. + + :param im: A PIL Image object, or a file name (given either as Python + string or a PyQt string object). + """ + im_data = _toqclass_helper(im) + # must keep a reference, or Qt will crash! + # All QImage constructors that take data operate on an existing + # buffer, so this buffer has to hang on for the life of the image. + # Fixes https://github.com/python-pillow/Pillow/issues/1370 + self.__data = im_data['data'] + QImage.__init__(self, + self.__data, im_data['im'].size[0], + im_data['im'].size[1], im_data['format']) + if im_data['colortable']: + self.setColorTable(im_data['colortable']) + + +def toqimage(im): + return ImageQt(im) + + +def toqpixmap(im): + # # This doesn't work. For now using a dumb approach. + # im_data = _toqclass_helper(im) + # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1]) + # result.loadFromData(im_data['data']) + # Fix some strange bug that causes + if im.mode == 'RGB': + im = im.convert('RGBA') + + qimage = toqimage(im) + return QPixmap.fromImage(qimage) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageQt.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageQt.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24d3b7d446e5a574d5a5a99efe737bfb278ca9cb Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageQt.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageSequence.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageSequence.py new file mode 100644 index 0000000000000000000000000000000000000000..1fc6e5de16539ea40cfbaaa3bc3757017f263454 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageSequence.py @@ -0,0 +1,56 @@ +# +# The Python Imaging Library. +# $Id$ +# +# sequence support classes +# +# history: +# 1997-02-20 fl Created +# +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1997 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +## + + +class Iterator(object): + """ + This class implements an iterator object that can be used to loop + over an image sequence. + + You can use the ``[]`` operator to access elements by index. This operator + will raise an :py:exc:`IndexError` if you try to access a nonexistent + frame. + + :param im: An image object. + """ + + def __init__(self, im): + if not hasattr(im, "seek"): + raise AttributeError("im must have seek method") + self.im = im + self.position = 0 + + def __getitem__(self, ix): + try: + self.im.seek(ix) + return self.im + except EOFError: + raise IndexError # end of sequence + + def __iter__(self): + return self + + def __next__(self): + try: + self.im.seek(self.position) + self.position += 1 + return self.im + except EOFError: + raise StopIteration + + def next(self): + return self.__next__() diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageSequence.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageSequence.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01496b190c409c7ed213bd8c7fc6ade33d75c48c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageSequence.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageShow.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageShow.py new file mode 100644 index 0000000000000000000000000000000000000000..b50d613588432965e852b23caa919d18c13574ec --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageShow.py @@ -0,0 +1,194 @@ +# +# The Python Imaging Library. +# $Id$ +# +# im.show() drivers +# +# History: +# 2008-04-06 fl Created +# +# Copyright (c) Secret Labs AB 2008. +# +# See the README file for information on usage and redistribution. +# + +from __future__ import print_function + +from PIL import Image +import os +import sys + +if sys.version_info.major >= 3: + from shlex import quote +else: + from pipes import quote + +_viewers = [] + + +def register(viewer, order=1): + try: + if issubclass(viewer, Viewer): + viewer = viewer() + except TypeError: + pass # raised if viewer wasn't a class + if order > 0: + _viewers.append(viewer) + elif order < 0: + _viewers.insert(0, viewer) + + +def show(image, title=None, **options): + r""" + Display a given image. + + :param image: An image object. + :param title: Optional title. Not all viewers can display the title. + :param \**options: Additional viewer options. + :returns: True if a suitable viewer was found, false otherwise. + """ + for viewer in _viewers: + if viewer.show(image, title=title, **options): + return 1 + return 0 + + +class Viewer(object): + """Base class for viewers.""" + + # main api + + def show(self, image, **options): + + # save temporary image to disk + if image.mode[:4] == "I;16": + # @PIL88 @PIL101 + # "I;16" isn't an 'official' mode, but we still want to + # provide a simple way to show 16-bit images. + base = "L" + # FIXME: auto-contrast if max() > 255? + else: + base = Image.getmodebase(image.mode) + if base != image.mode and image.mode != "1" and image.mode != "RGBA": + image = image.convert(base) + + return self.show_image(image, **options) + + # hook methods + + format = None + options = {} + + def get_format(self, image): + """Return format name, or None to save as PGM/PPM""" + return self.format + + def get_command(self, file, **options): + raise NotImplementedError + + def save_image(self, image): + """Save to temporary file, and return filename""" + return image._dump(format=self.get_format(image), **self.options) + + def show_image(self, image, **options): + """Display given image""" + return self.show_file(self.save_image(image), **options) + + def show_file(self, file, **options): + """Display given file""" + os.system(self.get_command(file, **options)) + return 1 + +# -------------------------------------------------------------------- + + +if sys.platform == "win32": + + class WindowsViewer(Viewer): + format = "BMP" + + def get_command(self, file, **options): + return ('start "Pillow" /WAIT "%s" ' + '&& ping -n 2 127.0.0.1 >NUL ' + '&& del /f "%s"' % (file, file)) + + register(WindowsViewer) + +elif sys.platform == "darwin": + + class MacViewer(Viewer): + format = "PNG" + options = {'compress_level': 1} + + def get_command(self, file, **options): + # on darwin open returns immediately resulting in the temp + # file removal while app is opening + command = "open -a /Applications/Preview.app" + command = "(%s %s; sleep 20; rm -f %s)&" % (command, quote(file), + quote(file)) + return command + + register(MacViewer) + +else: + + # unixoids + + def which(executable): + path = os.environ.get("PATH") + if not path: + return None + for dirname in path.split(os.pathsep): + filename = os.path.join(dirname, executable) + if os.path.isfile(filename) and os.access(filename, os.X_OK): + return filename + return None + + class UnixViewer(Viewer): + format = "PNG" + options = {'compress_level': 1} + + def show_file(self, file, **options): + command, executable = self.get_command_ex(file, **options) + command = "(%s %s; rm -f %s)&" % (command, quote(file), + quote(file)) + os.system(command) + return 1 + + # implementations + + class DisplayViewer(UnixViewer): + def get_command_ex(self, file, **options): + command = executable = "display" + return command, executable + + if which("display"): + register(DisplayViewer) + + class EogViewer(UnixViewer): + def get_command_ex(self, file, **options): + command = executable = "eog" + return command, executable + + if which("eog"): + register(EogViewer) + + class XVViewer(UnixViewer): + def get_command_ex(self, file, title=None, **options): + # note: xv is pretty outdated. most modern systems have + # imagemagick's display command instead. + command = executable = "xv" + if title: + command += " -name %s" % quote(title) + return command, executable + + if which("xv"): + register(XVViewer) + +if __name__ == "__main__": + + if len(sys.argv) < 2: + print("Syntax: python ImageShow.py imagefile [title]") + sys.exit() + + print(show(Image.open(sys.argv[1]), *sys.argv[2:])) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageShow.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageShow.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e41340d3402fc9af1b89e26cc646d4464553e70 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageShow.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageStat.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageStat.py new file mode 100644 index 0000000000000000000000000000000000000000..cd58fc8ff4bef7177e26968b176eb1af0404e876 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageStat.py @@ -0,0 +1,148 @@ +# +# The Python Imaging Library. +# $Id$ +# +# global image statistics +# +# History: +# 1996-04-05 fl Created +# 1997-05-21 fl Added mask; added rms, var, stddev attributes +# 1997-08-05 fl Added median +# 1998-07-05 hk Fixed integer overflow error +# +# Notes: +# This class shows how to implement delayed evaluation of attributes. +# To get a certain value, simply access the corresponding attribute. +# The __getattr__ dispatcher takes care of the rest. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996-97. +# +# See the README file for information on usage and redistribution. +# + +import math +import operator +import functools + + +class Stat(object): + + def __init__(self, image_or_list, mask=None): + try: + if mask: + self.h = image_or_list.histogram(mask) + else: + self.h = image_or_list.histogram() + except AttributeError: + self.h = image_or_list # assume it to be a histogram list + if not isinstance(self.h, list): + raise TypeError("first argument must be image or list") + self.bands = list(range(len(self.h) // 256)) + + def __getattr__(self, id): + "Calculate missing attribute" + if id[:4] == "_get": + raise AttributeError(id) + # calculate missing attribute + v = getattr(self, "_get" + id)() + setattr(self, id, v) + return v + + def _getextrema(self): + "Get min/max values for each band in the image" + + def minmax(histogram): + n = 255 + x = 0 + for i in range(256): + if histogram[i]: + n = min(n, i) + x = max(x, i) + return n, x # returns (255, 0) if there's no data in the histogram + + v = [] + for i in range(0, len(self.h), 256): + v.append(minmax(self.h[i:])) + return v + + def _getcount(self): + "Get total number of pixels in each layer" + + v = [] + for i in range(0, len(self.h), 256): + v.append(functools.reduce(operator.add, self.h[i:i+256])) + return v + + def _getsum(self): + "Get sum of all pixels in each layer" + + v = [] + for i in range(0, len(self.h), 256): + layerSum = 0.0 + for j in range(256): + layerSum += j * self.h[i + j] + v.append(layerSum) + return v + + def _getsum2(self): + "Get squared sum of all pixels in each layer" + + v = [] + for i in range(0, len(self.h), 256): + sum2 = 0.0 + for j in range(256): + sum2 += (j ** 2) * float(self.h[i + j]) + v.append(sum2) + return v + + def _getmean(self): + "Get average pixel level for each layer" + + v = [] + for i in self.bands: + v.append(self.sum[i] / self.count[i]) + return v + + def _getmedian(self): + "Get median pixel level for each layer" + + v = [] + for i in self.bands: + s = 0 + l = self.count[i]//2 + b = i * 256 + for j in range(256): + s = s + self.h[b+j] + if s > l: + break + v.append(j) + return v + + def _getrms(self): + "Get RMS for each layer" + + v = [] + for i in self.bands: + v.append(math.sqrt(self.sum2[i] / self.count[i])) + return v + + def _getvar(self): + "Get variance for each layer" + + v = [] + for i in self.bands: + n = self.count[i] + v.append((self.sum2[i]-(self.sum[i]**2.0)/n)/n) + return v + + def _getstddev(self): + "Get standard deviation for each layer" + + v = [] + for i in self.bands: + v.append(math.sqrt(self.var[i])) + return v + + +Global = Stat # compatibility diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageStat.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageStat.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c0e9a50e4c618b54bbe4b1da1dff7f0ec366bc9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageStat.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTk.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTk.py new file mode 100644 index 0000000000000000000000000000000000000000..b5ad53df7a438ee2a0f3cd39495421f075bb2e92 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTk.py @@ -0,0 +1,303 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a Tk display interface +# +# History: +# 96-04-08 fl Created +# 96-09-06 fl Added getimage method +# 96-11-01 fl Rewritten, removed image attribute and crop method +# 97-05-09 fl Use PyImagingPaste method instead of image type +# 97-05-12 fl Minor tweaks to match the IFUNC95 interface +# 97-05-17 fl Support the "pilbitmap" booster patch +# 97-06-05 fl Added file= and data= argument to image constructors +# 98-03-09 fl Added width and height methods to Image classes +# 98-07-02 fl Use default mode for "P" images without palette attribute +# 98-07-02 fl Explicitly destroy Tkinter image objects +# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch) +# 99-07-26 fl Automatically hook into Tkinter (if possible) +# 99-08-15 fl Hook uses _imagingtk instead of _imaging +# +# Copyright (c) 1997-1999 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import sys + +if sys.version_info.major > 2: + import tkinter +else: + import Tkinter as tkinter + +# required for pypy, which always has cffi installed +try: + from cffi import FFI + ffi = FFI() +except ImportError: + pass + +from . import Image +from io import BytesIO + + +# -------------------------------------------------------------------- +# Check for Tkinter interface hooks + +_pilbitmap_ok = None + + +def _pilbitmap_check(): + global _pilbitmap_ok + if _pilbitmap_ok is None: + try: + im = Image.new("1", (1, 1)) + tkinter.BitmapImage(data="PIL:%d" % im.im.id) + _pilbitmap_ok = 1 + except tkinter.TclError: + _pilbitmap_ok = 0 + return _pilbitmap_ok + + +def _get_image_from_kw(kw): + source = None + if "file" in kw: + source = kw.pop("file") + elif "data" in kw: + source = BytesIO(kw.pop("data")) + if source: + return Image.open(source) + + +# -------------------------------------------------------------------- +# PhotoImage + +class PhotoImage(object): + """ + A Tkinter-compatible photo image. This can be used + everywhere Tkinter expects an image object. If the image is an RGBA + image, pixels having alpha 0 are treated as transparent. + + The constructor takes either a PIL image, or a mode and a size. + Alternatively, you can use the **file** or **data** options to initialize + the photo image object. + + :param image: Either a PIL image, or a mode string. If a mode string is + used, a size must also be given. + :param size: If the first argument is a mode string, this defines the size + of the image. + :keyword file: A filename to load the image from (using + ``Image.open(file)``). + :keyword data: An 8-bit string containing image data (as loaded from an + image file). + """ + + def __init__(self, image=None, size=None, **kw): + + # Tk compatibility: file or data + if image is None: + image = _get_image_from_kw(kw) + + if hasattr(image, "mode") and hasattr(image, "size"): + # got an image instead of a mode + mode = image.mode + if mode == "P": + # palette mapped data + image.load() + try: + mode = image.palette.mode + except AttributeError: + mode = "RGB" # default + size = image.size + kw["width"], kw["height"] = size + else: + mode = image + image = None + + if mode not in ["1", "L", "RGB", "RGBA"]: + mode = Image.getmodebase(mode) + + self.__mode = mode + self.__size = size + self.__photo = tkinter.PhotoImage(**kw) + self.tk = self.__photo.tk + if image: + self.paste(image) + + def __del__(self): + name = self.__photo.name + self.__photo.name = None + try: + self.__photo.tk.call("image", "delete", name) + except: + pass # ignore internal errors + + def __str__(self): + """ + Get the Tkinter photo image identifier. This method is automatically + called by Tkinter whenever a PhotoImage object is passed to a Tkinter + method. + + :return: A Tkinter photo image identifier (a string). + """ + return str(self.__photo) + + def width(self): + """ + Get the width of the image. + + :return: The width, in pixels. + """ + return self.__size[0] + + def height(self): + """ + Get the height of the image. + + :return: The height, in pixels. + """ + return self.__size[1] + + def paste(self, im, box=None): + """ + Paste a PIL image into the photo image. Note that this can + be very slow if the photo image is displayed. + + :param im: A PIL image. The size must match the target region. If the + mode does not match, the image is converted to the mode of + the bitmap image. + :param box: A 4-tuple defining the left, upper, right, and lower pixel + coordinate. See :ref:`coordinate-system`. If None is given + instead of a tuple, all of the image is assumed. + """ + + # convert to blittable + im.load() + image = im.im + if image.isblock() and im.mode == self.__mode: + block = image + else: + block = image.new_block(self.__mode, im.size) + image.convert2(block, image) # convert directly between buffers + + tk = self.__photo.tk + + try: + tk.call("PyImagingPhoto", self.__photo, block.id) + except tkinter.TclError: + # activate Tkinter hook + try: + from . import _imagingtk + try: + if hasattr(tk, 'interp'): + # Pypy is using a ffi cdata element + # (Pdb) self.tk.interp + # + _imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1) + else: + _imagingtk.tkinit(tk.interpaddr(), 1) + except AttributeError: + _imagingtk.tkinit(id(tk), 0) + tk.call("PyImagingPhoto", self.__photo, block.id) + except (ImportError, AttributeError, tkinter.TclError): + raise # configuration problem; cannot attach to Tkinter + +# -------------------------------------------------------------------- +# BitmapImage + + +class BitmapImage(object): + """ + A Tkinter-compatible bitmap image. This can be used everywhere Tkinter + expects an image object. + + The given image must have mode "1". Pixels having value 0 are treated as + transparent. Options, if any, are passed on to Tkinter. The most commonly + used option is **foreground**, which is used to specify the color for the + non-transparent parts. See the Tkinter documentation for information on + how to specify colours. + + :param image: A PIL image. + """ + + def __init__(self, image=None, **kw): + + # Tk compatibility: file or data + if image is None: + image = _get_image_from_kw(kw) + + self.__mode = image.mode + self.__size = image.size + + if _pilbitmap_check(): + # fast way (requires the pilbitmap booster patch) + image.load() + kw["data"] = "PIL:%d" % image.im.id + self.__im = image # must keep a reference + else: + # slow but safe way + kw["data"] = image.tobitmap() + self.__photo = tkinter.BitmapImage(**kw) + + def __del__(self): + name = self.__photo.name + self.__photo.name = None + try: + self.__photo.tk.call("image", "delete", name) + except: + pass # ignore internal errors + + def width(self): + """ + Get the width of the image. + + :return: The width, in pixels. + """ + return self.__size[0] + + def height(self): + """ + Get the height of the image. + + :return: The height, in pixels. + """ + return self.__size[1] + + def __str__(self): + """ + Get the Tkinter bitmap image identifier. This method is automatically + called by Tkinter whenever a BitmapImage object is passed to a Tkinter + method. + + :return: A Tkinter bitmap image identifier (a string). + """ + return str(self.__photo) + + +def getimage(photo): + """ This function is unimplemented """ + + """Copies the contents of a PhotoImage to a PIL image memory.""" + photo.tk.call("PyImagingPhotoGet", photo) + + +def _show(image, title): + """Helper for the Image.show method.""" + + class UI(tkinter.Label): + def __init__(self, master, im): + if im.mode == "1": + self.image = BitmapImage(im, foreground="white", master=master) + else: + self.image = PhotoImage(im, master=master) + tkinter.Label.__init__(self, master, image=self.image, + bg="black", bd=0) + + if not tkinter._default_root: + raise IOError("tkinter not initialized") + top = tkinter.Toplevel() + if title: + top.title(title) + UI(top, image).pack() diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTk.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTk.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f63dd69eb1869bf965159ae8be4d8637815cac9e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTk.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTransform.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTransform.py new file mode 100644 index 0000000000000000000000000000000000000000..c3f6af8b5e4e25ea308c4df943043585dd15b126 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTransform.py @@ -0,0 +1,98 @@ +# +# The Python Imaging Library. +# $Id$ +# +# transform wrappers +# +# History: +# 2002-04-08 fl Created +# +# Copyright (c) 2002 by Secret Labs AB +# Copyright (c) 2002 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +class Transform(Image.ImageTransformHandler): + def __init__(self, data): + self.data = data + + def getdata(self): + return self.method, self.data + + def transform(self, size, image, **options): + # can be overridden + method, data = self.getdata() + return image.transform(size, method, data, **options) + + +class AffineTransform(Transform): + """ + Define an affine image transform. + + This function takes a 6-tuple (a, b, c, d, e, f) which contain the first + two rows from an affine transform matrix. For each pixel (x, y) in the + output image, the new value is taken from a position (a x + b y + c, + d x + e y + f) in the input image, rounded to nearest pixel. + + This function can be used to scale, translate, rotate, and shear the + original image. + + See :py:meth:`~PIL.Image.Image.transform` + + :param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows + from an affine transform matrix. + """ + method = Image.AFFINE + + +class ExtentTransform(Transform): + """ + Define a transform to extract a subregion from an image. + + Maps a rectangle (defined by two corners) from the image to a rectangle of + the given size. The resulting image will contain data sampled from between + the corners, such that (x0, y0) in the input image will end up at (0,0) in + the output image, and (x1, y1) at size. + + This method can be used to crop, stretch, shrink, or mirror an arbitrary + rectangle in the current image. It is slightly slower than crop, but about + as fast as a corresponding resize operation. + + See :py:meth:`~PIL.Image.Image.transform` + + :param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the + input image's coordinate system. See :ref:`coordinate-system`. + """ + method = Image.EXTENT + + +class QuadTransform(Transform): + """ + Define a quad image transform. + + Maps a quadrilateral (a region defined by four corners) from the image to a + rectangle of the given size. + + See :py:meth:`~PIL.Image.Image.transform` + + :param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the + upper left, lower left, lower right, and upper right corner of the + source quadrilateral. + """ + method = Image.QUAD + + +class MeshTransform(Transform): + """ + Define a mesh image transform. A mesh transform consists of one or more + individual quad transforms. + + See :py:meth:`~PIL.Image.Image.transform` + + :param data: A list of (bbox, quad) tuples. + """ + method = Image.MESH diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTransform.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTransform.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3487265246d59e695fd2528a37bf35b5fd16f2d6 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageTransform.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageWin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageWin.py new file mode 100644 index 0000000000000000000000000000000000000000..9b86270bc8efcb8e29372c45d6db28b3a9ae8659 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageWin.py @@ -0,0 +1,228 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a Windows DIB display interface +# +# History: +# 1996-05-20 fl Created +# 1996-09-20 fl Fixed subregion exposure +# 1997-09-21 fl Added draw primitive (for tzPrint) +# 2003-05-21 fl Added experimental Window/ImageWindow classes +# 2003-09-05 fl Added fromstring/tostring methods +# +# Copyright (c) Secret Labs AB 1997-2003. +# Copyright (c) Fredrik Lundh 1996-2003. +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +class HDC(object): + """ + Wraps an HDC integer. The resulting object can be passed to the + :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` + methods. + """ + def __init__(self, dc): + self.dc = dc + + def __int__(self): + return self.dc + + +class HWND(object): + """ + Wraps an HWND integer. The resulting object can be passed to the + :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` + methods, instead of a DC. + """ + def __init__(self, wnd): + self.wnd = wnd + + def __int__(self): + return self.wnd + + +class Dib(object): + """ + A Windows bitmap with the given mode and size. The mode can be one of "1", + "L", "P", or "RGB". + + If the display requires a palette, this constructor creates a suitable + palette and associates it with the image. For an "L" image, 128 greylevels + are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together + with 20 greylevels. + + To make sure that palettes work properly under Windows, you must call the + **palette** method upon certain events from Windows. + + :param image: Either a PIL image, or a mode string. If a mode string is + used, a size must also be given. The mode can be one of "1", + "L", "P", or "RGB". + :param size: If the first argument is a mode string, this + defines the size of the image. + """ + + def __init__(self, image, size=None): + if hasattr(image, "mode") and hasattr(image, "size"): + mode = image.mode + size = image.size + else: + mode = image + image = None + if mode not in ["1", "L", "P", "RGB"]: + mode = Image.getmodebase(mode) + self.image = Image.core.display(mode, size) + self.mode = mode + self.size = size + if image: + self.paste(image) + + def expose(self, handle): + """ + Copy the bitmap contents to a device context. + + :param handle: Device context (HDC), cast to a Python integer, or an + HDC or HWND instance. In PythonWin, you can use the + :py:meth:`CDC.GetHandleAttrib` to get a suitable handle. + """ + if isinstance(handle, HWND): + dc = self.image.getdc(handle) + try: + result = self.image.expose(dc) + finally: + self.image.releasedc(handle, dc) + else: + result = self.image.expose(handle) + return result + + def draw(self, handle, dst, src=None): + """ + Same as expose, but allows you to specify where to draw the image, and + what part of it to draw. + + The destination and source areas are given as 4-tuple rectangles. If + the source is omitted, the entire image is copied. If the source and + the destination have different sizes, the image is resized as + necessary. + """ + if not src: + src = (0, 0) + self.size + if isinstance(handle, HWND): + dc = self.image.getdc(handle) + try: + result = self.image.draw(dc, dst, src) + finally: + self.image.releasedc(handle, dc) + else: + result = self.image.draw(handle, dst, src) + return result + + def query_palette(self, handle): + """ + Installs the palette associated with the image in the given device + context. + + This method should be called upon **QUERYNEWPALETTE** and + **PALETTECHANGED** events from Windows. If this method returns a + non-zero value, one or more display palette entries were changed, and + the image should be redrawn. + + :param handle: Device context (HDC), cast to a Python integer, or an + HDC or HWND instance. + :return: A true value if one or more entries were changed (this + indicates that the image should be redrawn). + """ + if isinstance(handle, HWND): + handle = self.image.getdc(handle) + try: + result = self.image.query_palette(handle) + finally: + self.image.releasedc(handle, handle) + else: + result = self.image.query_palette(handle) + return result + + def paste(self, im, box=None): + """ + Paste a PIL image into the bitmap image. + + :param im: A PIL image. The size must match the target region. + If the mode does not match, the image is converted to the + mode of the bitmap image. + :param box: A 4-tuple defining the left, upper, right, and + lower pixel coordinate. See :ref:`coordinate-system`. If + None is given instead of a tuple, all of the image is + assumed. + """ + im.load() + if self.mode != im.mode: + im = im.convert(self.mode) + if box: + self.image.paste(im.im, box) + else: + self.image.paste(im.im) + + def frombytes(self, buffer): + """ + Load display memory contents from byte data. + + :param buffer: A buffer containing display data (usually + data returned from tobytes) + """ + return self.image.frombytes(buffer) + + def tobytes(self): + """ + Copy display memory contents to bytes object. + + :return: A bytes object containing display data. + """ + return self.image.tobytes() + + +class Window(object): + """Create a Window with the given title size.""" + + def __init__(self, title="PIL", width=None, height=None): + self.hwnd = Image.core.createwindow( + title, self.__dispatcher, width or 0, height or 0 + ) + + def __dispatcher(self, action, *args): + return getattr(self, "ui_handle_" + action)(*args) + + def ui_handle_clear(self, dc, x0, y0, x1, y1): + pass + + def ui_handle_damage(self, x0, y0, x1, y1): + pass + + def ui_handle_destroy(self): + pass + + def ui_handle_repair(self, dc, x0, y0, x1, y1): + pass + + def ui_handle_resize(self, width, height): + pass + + def mainloop(self): + Image.core.eventloop() + + +class ImageWindow(Window): + """Create an image window which displays the given image.""" + + def __init__(self, image, title="PIL"): + if not isinstance(image, Dib): + image = Dib(image) + self.image = image + width, height = image.size + Window.__init__(self, title, width=width, height=height) + + def ui_handle_repair(self, dc, x0, y0, x1, y1): + self.image.draw(dc, (x0, y0, x1, y1)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageWin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageWin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17b9fca5ed8db663b600061c14bfa1f351519428 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImageWin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImtImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImtImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..05e8cd31a5c80655292e78e5c1efec481a06d909 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImtImagePlugin.py @@ -0,0 +1,95 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IM Tools support for PIL +# +# history: +# 1996-05-27 fl Created (read 8-bit images only) +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2) +# +# Copyright (c) Secret Labs AB 1997-2001. +# Copyright (c) Fredrik Lundh 1996-2001. +# +# See the README file for information on usage and redistribution. +# + + +import re + +from . import Image, ImageFile + +__version__ = "0.2" + + +# +# -------------------------------------------------------------------- + +field = re.compile(br"([a-z]*) ([^ \r\n]*)") + + +## +# Image plugin for IM Tools images. + +class ImtImageFile(ImageFile.ImageFile): + + format = "IMT" + format_description = "IM Tools" + + def _open(self): + + # Quick rejection: if there's not a LF among the first + # 100 bytes, this is (probably) not a text header. + + if b"\n" not in self.fp.read(100): + raise SyntaxError("not an IM file") + self.fp.seek(0) + + xsize = ysize = 0 + + while True: + + s = self.fp.read(1) + if not s: + break + + if s == b'\x0C': + + # image data begins + self.tile = [("raw", (0, 0)+self.size, + self.fp.tell(), + (self.mode, 0, 1))] + + break + + else: + + # read key/value pair + # FIXME: dangerous, may read whole file + s = s + self.fp.readline() + if len(s) == 1 or len(s) > 100: + break + if s[0] == ord(b"*"): + continue # comment + + m = field.match(s) + if not m: + break + k, v = m.group(1, 2) + if k == "width": + xsize = int(v) + self.size = xsize, ysize + elif k == "height": + ysize = int(v) + self.size = xsize, ysize + elif k == "pixel" and v == "n8": + self.mode = "L" + + +# +# -------------------------------------------------------------------- + +Image.register_open(ImtImageFile.format, ImtImageFile) + +# +# no extension registered (".im" is simply too common) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImtImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImtImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cacc164dfb1f7ea40095137f67b0f83250bba56 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/ImtImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IptcImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IptcImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..f5a8de17e3fb5b23810baea26e964d1e253ed1b6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IptcImagePlugin.py @@ -0,0 +1,257 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IPTC/NAA file handling +# +# history: +# 1995-10-01 fl Created +# 1998-03-09 fl Cleaned up and added to PIL +# 2002-06-18 fl Added getiptcinfo helper +# +# Copyright (c) Secret Labs AB 1997-2002. +# Copyright (c) Fredrik Lundh 1995. +# +# See the README file for information on usage and redistribution. +# + +from __future__ import print_function + +from . import Image, ImageFile +from ._binary import i8, i16be as i16, i32be as i32, o8 +import os +import tempfile + +__version__ = "0.3" + +COMPRESSION = { + 1: "raw", + 5: "jpeg" +} + +PAD = o8(0) * 4 + + +# +# Helpers + +def i(c): + return i32((PAD + c)[-4:]) + + +def dump(c): + for i in c: + print("%02x" % i8(i), end=' ') + print() + + +## +# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields +# from TIFF and JPEG files, use the getiptcinfo function. + +class IptcImageFile(ImageFile.ImageFile): + + format = "IPTC" + format_description = "IPTC/NAA" + + def getint(self, key): + return i(self.info[key]) + + def field(self): + # + # get a IPTC field header + s = self.fp.read(5) + if not len(s): + return None, 0 + + tag = i8(s[1]), i8(s[2]) + + # syntax + if i8(s[0]) != 0x1C or tag[0] < 1 or tag[0] > 9: + raise SyntaxError("invalid IPTC/NAA file") + + # field size + size = i8(s[3]) + if size > 132: + raise IOError("illegal field length in IPTC/NAA file") + elif size == 128: + size = 0 + elif size > 128: + size = i(self.fp.read(size-128)) + else: + size = i16(s[3:]) + + return tag, size + + def _open(self): + + # load descriptive fields + while True: + offset = self.fp.tell() + tag, size = self.field() + if not tag or tag == (8, 10): + break + if size: + tagdata = self.fp.read(size) + else: + tagdata = None + if tag in self.info: + if isinstance(self.info[tag], list): + self.info[tag].append(tagdata) + else: + self.info[tag] = [self.info[tag], tagdata] + else: + self.info[tag] = tagdata + + # print(tag, self.info[tag]) + + # mode + layers = i8(self.info[(3, 60)][0]) + component = i8(self.info[(3, 60)][1]) + if (3, 65) in self.info: + id = i8(self.info[(3, 65)][0])-1 + else: + id = 0 + if layers == 1 and not component: + self.mode = "L" + elif layers == 3 and component: + self.mode = "RGB"[id] + elif layers == 4 and component: + self.mode = "CMYK"[id] + + # size + self.size = self.getint((3, 20)), self.getint((3, 30)) + + # compression + try: + compression = COMPRESSION[self.getint((3, 120))] + except KeyError: + raise IOError("Unknown IPTC image compression") + + # tile + if tag == (8, 10): + self.tile = [("iptc", (compression, offset), + (0, 0, self.size[0], self.size[1]))] + + def load(self): + + if len(self.tile) != 1 or self.tile[0][0] != "iptc": + return ImageFile.ImageFile.load(self) + + type, tile, box = self.tile[0] + + encoding, offset = tile + + self.fp.seek(offset) + + # Copy image data to temporary file + o_fd, outfile = tempfile.mkstemp(text=False) + o = os.fdopen(o_fd) + if encoding == "raw": + # To simplify access to the extracted file, + # prepend a PPM header + o.write("P5\n%d %d\n255\n" % self.size) + while True: + type, size = self.field() + if type != (8, 10): + break + while size > 0: + s = self.fp.read(min(size, 8192)) + if not s: + break + o.write(s) + size -= len(s) + o.close() + + try: + _im = Image.open(outfile) + _im.load() + self.im = _im.im + finally: + try: + os.unlink(outfile) + except OSError: + pass + + +Image.register_open(IptcImageFile.format, IptcImageFile) + +Image.register_extension(IptcImageFile.format, ".iim") + + +def getiptcinfo(im): + """ + Get IPTC information from TIFF, JPEG, or IPTC file. + + :param im: An image containing IPTC data. + :returns: A dictionary containing IPTC information, or None if + no IPTC information block was found. + """ + from . import TiffImagePlugin, JpegImagePlugin + import io + + data = None + + if isinstance(im, IptcImageFile): + # return info dictionary right away + return im.info + + elif isinstance(im, JpegImagePlugin.JpegImageFile): + # extract the IPTC/NAA resource + try: + app = im.app["APP13"] + if app[:14] == b"Photoshop 3.0\x00": + app = app[14:] + # parse the image resource block + offset = 0 + while app[offset:offset+4] == b"8BIM": + offset += 4 + # resource code + code = i16(app, offset) + offset += 2 + # resource name (usually empty) + name_len = i8(app[offset]) + # name = app[offset+1:offset+1+name_len] + offset = 1 + offset + name_len + if offset & 1: + offset += 1 + # resource data block + size = i32(app, offset) + offset += 4 + if code == 0x0404: + # 0x0404 contains IPTC/NAA data + data = app[offset:offset+size] + break + offset = offset + size + if offset & 1: + offset += 1 + except (AttributeError, KeyError): + pass + + elif isinstance(im, TiffImagePlugin.TiffImageFile): + # get raw data from the IPTC/NAA tag (PhotoShop tags the data + # as 4-byte integers, so we cannot use the get method...) + try: + data = im.tag.tagdata[TiffImagePlugin.IPTC_NAA_CHUNK] + except (AttributeError, KeyError): + pass + + if data is None: + return None # no properties + + # create an IptcImagePlugin object without initializing it + class FakeImage(object): + pass + im = FakeImage() + im.__class__ = IptcImageFile + + # parse the IPTC information chunk + im.info = {} + im.fp = io.BytesIO(data) + + try: + im._open() + except (IndexError, KeyError): + pass # expected failure + + return im.info diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IptcImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IptcImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15cca12d5f752525adbd2fa2b0d0e0e414c81649 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/IptcImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Jpeg2KImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Jpeg2KImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..25fbefbca5855791d15e5dc251bc4fae1b8ae62d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Jpeg2KImagePlugin.py @@ -0,0 +1,276 @@ +# +# The Python Imaging Library +# $Id$ +# +# JPEG2000 file handling +# +# History: +# 2014-03-12 ajh Created +# +# Copyright (c) 2014 Coriolis Systems Limited +# Copyright (c) 2014 Alastair Houghton +# +# See the README file for information on usage and redistribution. +# +from . import Image, ImageFile +import struct +import os +import io + +__version__ = "0.1" + + +def _parse_codestream(fp): + """Parse the JPEG 2000 codestream to extract the size and component + count from the SIZ marker segment, returning a PIL (size, mode) tuple.""" + + hdr = fp.read(2) + lsiz = struct.unpack('>H', hdr)[0] + siz = hdr + fp.read(lsiz - 2) + lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, xtsiz, ytsiz, \ + xtosiz, ytosiz, csiz \ + = struct.unpack_from('>HHIIIIIIIIH', siz) + ssiz = [None]*csiz + xrsiz = [None]*csiz + yrsiz = [None]*csiz + for i in range(csiz): + ssiz[i], xrsiz[i], yrsiz[i] \ + = struct.unpack_from('>BBB', siz, 36 + 3 * i) + + size = (xsiz - xosiz, ysiz - yosiz) + if csiz == 1: + if (yrsiz[0] & 0x7f) > 8: + mode = 'I;16' + else: + mode = 'L' + elif csiz == 2: + mode = 'LA' + elif csiz == 3: + mode = 'RGB' + elif csiz == 4: + mode = 'RGBA' + else: + mode = None + + return (size, mode) + + +def _parse_jp2_header(fp): + """Parse the JP2 header box to extract size, component count and + color space information, returning a PIL (size, mode) tuple.""" + + # Find the JP2 header box + header = None + while True: + lbox, tbox = struct.unpack('>I4s', fp.read(8)) + if lbox == 1: + lbox = struct.unpack('>Q', fp.read(8))[0] + hlen = 16 + else: + hlen = 8 + + if lbox < hlen: + raise SyntaxError('Invalid JP2 header length') + + if tbox == b'jp2h': + header = fp.read(lbox - hlen) + break + else: + fp.seek(lbox - hlen, os.SEEK_CUR) + + if header is None: + raise SyntaxError('could not find JP2 header') + + size = None + mode = None + bpc = None + nc = None + + hio = io.BytesIO(header) + while True: + lbox, tbox = struct.unpack('>I4s', hio.read(8)) + if lbox == 1: + lbox = struct.unpack('>Q', hio.read(8))[0] + hlen = 16 + else: + hlen = 8 + + content = hio.read(lbox - hlen) + + if tbox == b'ihdr': + height, width, nc, bpc, c, unkc, ipr \ + = struct.unpack('>IIHBBBB', content) + size = (width, height) + if unkc: + if nc == 1 and (bpc & 0x7f) > 8: + mode = 'I;16' + elif nc == 1: + mode = 'L' + elif nc == 2: + mode = 'LA' + elif nc == 3: + mode = 'RGB' + elif nc == 4: + mode = 'RGBA' + break + elif tbox == b'colr': + meth, prec, approx = struct.unpack_from('>BBB', content) + if meth == 1: + cs = struct.unpack_from('>I', content, 3)[0] + if cs == 16: # sRGB + if nc == 1 and (bpc & 0x7f) > 8: + mode = 'I;16' + elif nc == 1: + mode = 'L' + elif nc == 3: + mode = 'RGB' + elif nc == 4: + mode = 'RGBA' + break + elif cs == 17: # grayscale + if nc == 1 and (bpc & 0x7f) > 8: + mode = 'I;16' + elif nc == 1: + mode = 'L' + elif nc == 2: + mode = 'LA' + break + elif cs == 18: # sYCC + if nc == 3: + mode = 'RGB' + elif nc == 4: + mode = 'RGBA' + break + + if size is None or mode is None: + raise SyntaxError("Malformed jp2 header") + + return (size, mode) + +## +# Image plugin for JPEG2000 images. + + +class Jpeg2KImageFile(ImageFile.ImageFile): + format = "JPEG2000" + format_description = "JPEG 2000 (ISO 15444)" + + def _open(self): + sig = self.fp.read(4) + if sig == b'\xff\x4f\xff\x51': + self.codec = "j2k" + self.size, self.mode = _parse_codestream(self.fp) + else: + sig = sig + self.fp.read(8) + + if sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a': + self.codec = "jp2" + self.size, self.mode = _parse_jp2_header(self.fp) + else: + raise SyntaxError('not a JPEG 2000 file') + + if self.size is None or self.mode is None: + raise SyntaxError('unable to determine size/mode') + + self.reduce = 0 + self.layers = 0 + + fd = -1 + length = -1 + + try: + fd = self.fp.fileno() + length = os.fstat(fd).st_size + except: + fd = -1 + try: + pos = self.fp.tell() + self.fp.seek(0, 2) + length = self.fp.tell() + self.fp.seek(pos, 0) + except: + length = -1 + + self.tile = [('jpeg2k', (0, 0) + self.size, 0, + (self.codec, self.reduce, self.layers, fd, length))] + + def load(self): + if self.reduce: + power = 1 << self.reduce + adjust = power >> 1 + self.size = (int((self.size[0] + adjust) / power), + int((self.size[1] + adjust) / power)) + + if self.tile: + # Update the reduce and layers settings + t = self.tile[0] + t3 = (t[3][0], self.reduce, self.layers, t[3][3], t[3][4]) + self.tile = [(t[0], (0, 0) + self.size, t[2], t3)] + + return ImageFile.ImageFile.load(self) + + +def _accept(prefix): + return (prefix[:4] == b'\xff\x4f\xff\x51' or + prefix[:12] == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a') + + +# ------------------------------------------------------------ +# Save support + +def _save(im, fp, filename): + if filename.endswith('.j2k'): + kind = 'j2k' + else: + kind = 'jp2' + + # Get the keyword arguments + info = im.encoderinfo + + offset = info.get('offset', None) + tile_offset = info.get('tile_offset', None) + tile_size = info.get('tile_size', None) + quality_mode = info.get('quality_mode', 'rates') + quality_layers = info.get('quality_layers', None) + num_resolutions = info.get('num_resolutions', 0) + cblk_size = info.get('codeblock_size', None) + precinct_size = info.get('precinct_size', None) + irreversible = info.get('irreversible', False) + progression = info.get('progression', 'LRCP') + cinema_mode = info.get('cinema_mode', 'no') + fd = -1 + + if hasattr(fp, "fileno"): + try: + fd = fp.fileno() + except: + fd = -1 + + im.encoderconfig = ( + offset, + tile_offset, + tile_size, + quality_mode, + quality_layers, + num_resolutions, + cblk_size, + precinct_size, + irreversible, + progression, + cinema_mode, + fd + ) + + ImageFile._save(im, fp, [('jpeg2k', (0, 0)+im.size, 0, kind)]) + +# ------------------------------------------------------------ +# Registry stuff + + +Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept) +Image.register_save(Jpeg2KImageFile.format, _save) + +Image.register_extensions(Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"]) + +Image.register_mime(Jpeg2KImageFile.format, 'image/jp2') +Image.register_mime(Jpeg2KImageFile.format, 'image/jpx') diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Jpeg2KImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Jpeg2KImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..655a48ed03e2fc7b9121acada6aa145d65ff6586 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/Jpeg2KImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..97ef834d3d4157b41073f3bbb95faeac9ad22b67 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegImagePlugin.py @@ -0,0 +1,804 @@ +# +# The Python Imaging Library. +# $Id$ +# +# JPEG (JFIF) file handling +# +# See "Digital Compression and Coding of Continuous-Tone Still Images, +# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1) +# +# History: +# 1995-09-09 fl Created +# 1995-09-13 fl Added full parser +# 1996-03-25 fl Added hack to use the IJG command line utilities +# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug +# 1996-05-28 fl Added draft support, JFIF version (0.1) +# 1996-12-30 fl Added encoder options, added progression property (0.2) +# 1997-08-27 fl Save mode 1 images as BW (0.3) +# 1998-07-12 fl Added YCbCr to draft and save methods (0.4) +# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1) +# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2) +# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3) +# 2003-04-25 fl Added experimental EXIF decoder (0.5) +# 2003-06-06 fl Added experimental EXIF GPSinfo decoder +# 2003-09-13 fl Extract COM markers +# 2009-09-06 fl Added icc_profile support (from Florian Hoech) +# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6) +# 2009-03-08 fl Added subsampling support (from Justin Huff). +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +from __future__ import print_function + +import array +import struct +import io +import warnings +from . import Image, ImageFile, TiffImagePlugin +from ._binary import i8, o8, i16be as i16 +from .JpegPresets import presets +from ._util import isStringType + +__version__ = "0.6" + + +# +# Parser + +def Skip(self, marker): + n = i16(self.fp.read(2))-2 + ImageFile._safe_read(self.fp, n) + + +def APP(self, marker): + # + # Application marker. Store these in the APP dictionary. + # Also look for well-known application markers. + + n = i16(self.fp.read(2))-2 + s = ImageFile._safe_read(self.fp, n) + + app = "APP%d" % (marker & 15) + + self.app[app] = s # compatibility + self.applist.append((app, s)) + + if marker == 0xFFE0 and s[:4] == b"JFIF": + # extract JFIF information + self.info["jfif"] = version = i16(s, 5) # version + self.info["jfif_version"] = divmod(version, 256) + # extract JFIF properties + try: + jfif_unit = i8(s[7]) + jfif_density = i16(s, 8), i16(s, 10) + except: + pass + else: + if jfif_unit == 1: + self.info["dpi"] = jfif_density + self.info["jfif_unit"] = jfif_unit + self.info["jfif_density"] = jfif_density + elif marker == 0xFFE1 and s[:5] == b"Exif\0": + if "exif" not in self.info: + # extract Exif information (incomplete) + self.info["exif"] = s # FIXME: value will change + elif marker == 0xFFE2 and s[:5] == b"FPXR\0": + # extract FlashPix information (incomplete) + self.info["flashpix"] = s # FIXME: value will change + elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0": + # Since an ICC profile can be larger than the maximum size of + # a JPEG marker (64K), we need provisions to split it into + # multiple markers. The format defined by the ICC specifies + # one or more APP2 markers containing the following data: + # Identifying string ASCII "ICC_PROFILE\0" (12 bytes) + # Marker sequence number 1, 2, etc (1 byte) + # Number of markers Total of APP2's used (1 byte) + # Profile data (remainder of APP2 data) + # Decoders should use the marker sequence numbers to + # reassemble the profile, rather than assuming that the APP2 + # markers appear in the correct sequence. + self.icclist.append(s) + elif marker == 0xFFEE and s[:5] == b"Adobe": + self.info["adobe"] = i16(s, 5) + # extract Adobe custom properties + try: + adobe_transform = i8(s[1]) + except: + pass + else: + self.info["adobe_transform"] = adobe_transform + elif marker == 0xFFE2 and s[:4] == b"MPF\0": + # extract MPO information + self.info["mp"] = s[4:] + # offset is current location minus buffer size + # plus constant header size + self.info["mpoffset"] = self.fp.tell() - n + 4 + + # If DPI isn't in JPEG header, fetch from EXIF + if "dpi" not in self.info and "exif" in self.info: + try: + exif = self._getexif() + resolution_unit = exif[0x0128] + x_resolution = exif[0x011A] + try: + dpi = x_resolution[0] / x_resolution[1] + except TypeError: + dpi = x_resolution + if resolution_unit == 3: # cm + # 1 dpcm = 2.54 dpi + dpi *= 2.54 + self.info["dpi"] = dpi, dpi + except (KeyError, SyntaxError, ZeroDivisionError): + # SyntaxError for invalid/unreadable exif + # KeyError for dpi not included + # ZeroDivisionError for invalid dpi rational value + self.info["dpi"] = 72, 72 + + +def COM(self, marker): + # + # Comment marker. Store these in the APP dictionary. + n = i16(self.fp.read(2))-2 + s = ImageFile._safe_read(self.fp, n) + + self.app["COM"] = s # compatibility + self.applist.append(("COM", s)) + + +def SOF(self, marker): + # + # Start of frame marker. Defines the size and mode of the + # image. JPEG is colour blind, so we use some simple + # heuristics to map the number of layers to an appropriate + # mode. Note that this could be made a bit brighter, by + # looking for JFIF and Adobe APP markers. + + n = i16(self.fp.read(2))-2 + s = ImageFile._safe_read(self.fp, n) + self.size = i16(s[3:]), i16(s[1:]) + + self.bits = i8(s[0]) + if self.bits != 8: + raise SyntaxError("cannot handle %d-bit layers" % self.bits) + + self.layers = i8(s[5]) + if self.layers == 1: + self.mode = "L" + elif self.layers == 3: + self.mode = "RGB" + elif self.layers == 4: + self.mode = "CMYK" + else: + raise SyntaxError("cannot handle %d-layer images" % self.layers) + + if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]: + self.info["progressive"] = self.info["progression"] = 1 + + if self.icclist: + # fixup icc profile + self.icclist.sort() # sort by sequence number + if i8(self.icclist[0][13]) == len(self.icclist): + profile = [] + for p in self.icclist: + profile.append(p[14:]) + icc_profile = b"".join(profile) + else: + icc_profile = None # wrong number of fragments + self.info["icc_profile"] = icc_profile + self.icclist = None + + for i in range(6, len(s), 3): + t = s[i:i+3] + # 4-tuples: id, vsamp, hsamp, qtable + self.layer.append((t[0], i8(t[1])//16, i8(t[1]) & 15, i8(t[2]))) + + +def DQT(self, marker): + # + # Define quantization table. Support baseline 8-bit tables + # only. Note that there might be more than one table in + # each marker. + + # FIXME: The quantization tables can be used to estimate the + # compression quality. + + n = i16(self.fp.read(2))-2 + s = ImageFile._safe_read(self.fp, n) + while len(s): + if len(s) < 65: + raise SyntaxError("bad quantization table marker") + v = i8(s[0]) + if v//16 == 0: + self.quantization[v & 15] = array.array("B", s[1:65]) + s = s[65:] + else: + return # FIXME: add code to read 16-bit tables! + # raise SyntaxError, "bad quantization table element size" + + +# +# JPEG marker table + +MARKER = { + 0xFFC0: ("SOF0", "Baseline DCT", SOF), + 0xFFC1: ("SOF1", "Extended Sequential DCT", SOF), + 0xFFC2: ("SOF2", "Progressive DCT", SOF), + 0xFFC3: ("SOF3", "Spatial lossless", SOF), + 0xFFC4: ("DHT", "Define Huffman table", Skip), + 0xFFC5: ("SOF5", "Differential sequential DCT", SOF), + 0xFFC6: ("SOF6", "Differential progressive DCT", SOF), + 0xFFC7: ("SOF7", "Differential spatial", SOF), + 0xFFC8: ("JPG", "Extension", None), + 0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF), + 0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF), + 0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF), + 0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip), + 0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF), + 0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF), + 0xFFCF: ("SOF15", "Differential spatial (AC)", SOF), + 0xFFD0: ("RST0", "Restart 0", None), + 0xFFD1: ("RST1", "Restart 1", None), + 0xFFD2: ("RST2", "Restart 2", None), + 0xFFD3: ("RST3", "Restart 3", None), + 0xFFD4: ("RST4", "Restart 4", None), + 0xFFD5: ("RST5", "Restart 5", None), + 0xFFD6: ("RST6", "Restart 6", None), + 0xFFD7: ("RST7", "Restart 7", None), + 0xFFD8: ("SOI", "Start of image", None), + 0xFFD9: ("EOI", "End of image", None), + 0xFFDA: ("SOS", "Start of scan", Skip), + 0xFFDB: ("DQT", "Define quantization table", DQT), + 0xFFDC: ("DNL", "Define number of lines", Skip), + 0xFFDD: ("DRI", "Define restart interval", Skip), + 0xFFDE: ("DHP", "Define hierarchical progression", SOF), + 0xFFDF: ("EXP", "Expand reference component", Skip), + 0xFFE0: ("APP0", "Application segment 0", APP), + 0xFFE1: ("APP1", "Application segment 1", APP), + 0xFFE2: ("APP2", "Application segment 2", APP), + 0xFFE3: ("APP3", "Application segment 3", APP), + 0xFFE4: ("APP4", "Application segment 4", APP), + 0xFFE5: ("APP5", "Application segment 5", APP), + 0xFFE6: ("APP6", "Application segment 6", APP), + 0xFFE7: ("APP7", "Application segment 7", APP), + 0xFFE8: ("APP8", "Application segment 8", APP), + 0xFFE9: ("APP9", "Application segment 9", APP), + 0xFFEA: ("APP10", "Application segment 10", APP), + 0xFFEB: ("APP11", "Application segment 11", APP), + 0xFFEC: ("APP12", "Application segment 12", APP), + 0xFFED: ("APP13", "Application segment 13", APP), + 0xFFEE: ("APP14", "Application segment 14", APP), + 0xFFEF: ("APP15", "Application segment 15", APP), + 0xFFF0: ("JPG0", "Extension 0", None), + 0xFFF1: ("JPG1", "Extension 1", None), + 0xFFF2: ("JPG2", "Extension 2", None), + 0xFFF3: ("JPG3", "Extension 3", None), + 0xFFF4: ("JPG4", "Extension 4", None), + 0xFFF5: ("JPG5", "Extension 5", None), + 0xFFF6: ("JPG6", "Extension 6", None), + 0xFFF7: ("JPG7", "Extension 7", None), + 0xFFF8: ("JPG8", "Extension 8", None), + 0xFFF9: ("JPG9", "Extension 9", None), + 0xFFFA: ("JPG10", "Extension 10", None), + 0xFFFB: ("JPG11", "Extension 11", None), + 0xFFFC: ("JPG12", "Extension 12", None), + 0xFFFD: ("JPG13", "Extension 13", None), + 0xFFFE: ("COM", "Comment", COM) +} + + +def _accept(prefix): + return prefix[0:1] == b"\377" + + +## +# Image plugin for JPEG and JFIF images. + +class JpegImageFile(ImageFile.ImageFile): + + format = "JPEG" + format_description = "JPEG (ISO 10918)" + + def _open(self): + + s = self.fp.read(1) + + if i8(s) != 255: + raise SyntaxError("not a JPEG file") + + # Create attributes + self.bits = self.layers = 0 + + # JPEG specifics (internal) + self.layer = [] + self.huffman_dc = {} + self.huffman_ac = {} + self.quantization = {} + self.app = {} # compatibility + self.applist = [] + self.icclist = [] + + while True: + + i = i8(s) + if i == 0xFF: + s = s + self.fp.read(1) + i = i16(s) + else: + # Skip non-0xFF junk + s = self.fp.read(1) + continue + + if i in MARKER: + name, description, handler = MARKER[i] + # print(hex(i), name, description) + if handler is not None: + handler(self, i) + if i == 0xFFDA: # start of scan + rawmode = self.mode + if self.mode == "CMYK": + rawmode = "CMYK;I" # assume adobe conventions + self.tile = [("jpeg", (0, 0) + self.size, 0, + (rawmode, ""))] + # self.__offset = self.fp.tell() + break + s = self.fp.read(1) + elif i == 0 or i == 0xFFFF: + # padded marker or junk; move on + s = b"\xff" + elif i == 0xFF00: # Skip extraneous data (escaped 0xFF) + s = self.fp.read(1) + else: + raise SyntaxError("no marker found") + + def load_read(self, read_bytes): + """ + internal: read more image data + For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker + so libjpeg can finish decoding + """ + s = self.fp.read(read_bytes) + + if not s and ImageFile.LOAD_TRUNCATED_IMAGES: + # Premature EOF. + # Pretend file is finished adding EOI marker + return b"\xFF\xD9" + + return s + + def draft(self, mode, size): + + if len(self.tile) != 1: + return + + # Protect from second call + if self.decoderconfig: + return + + d, e, o, a = self.tile[0] + scale = 0 + + if a[0] == "RGB" and mode in ["L", "YCbCr"]: + self.mode = mode + a = mode, "" + + if size: + scale = min(self.size[0] // size[0], self.size[1] // size[1]) + for s in [8, 4, 2, 1]: + if scale >= s: + break + e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1] + self.size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s) + scale = s + + self.tile = [(d, e, o, a)] + self.decoderconfig = (scale, 0) + + return self + + def load_djpeg(self): + + # ALTERNATIVE: handle JPEGs via the IJG command line utilities + + import subprocess + import tempfile + import os + f, path = tempfile.mkstemp() + os.close(f) + if os.path.exists(self.filename): + subprocess.check_call(["djpeg", "-outfile", path, self.filename]) + else: + raise ValueError("Invalid Filename") + + try: + _im = Image.open(path) + _im.load() + self.im = _im.im + finally: + try: + os.unlink(path) + except OSError: + pass + + self.mode = self.im.mode + self.size = self.im.size + + self.tile = [] + + def _getexif(self): + return _getexif(self) + + def _getmp(self): + return _getmp(self) + + +def _fixup_dict(src_dict): + # Helper function for _getexif() + # returns a dict with any single item tuples/lists as individual values + def _fixup(value): + try: + if len(value) == 1 and not isinstance(value, dict): + return value[0] + except: + pass + return value + + return {k: _fixup(v) for k, v in src_dict.items()} + + +def _getexif(self): + # Extract EXIF information. This method is highly experimental, + # and is likely to be replaced with something better in a future + # version. + + # The EXIF record consists of a TIFF file embedded in a JPEG + # application marker (!). + try: + data = self.info["exif"] + except KeyError: + return None + file = io.BytesIO(data[6:]) + head = file.read(8) + # process dictionary + info = TiffImagePlugin.ImageFileDirectory_v1(head) + info.load(file) + exif = dict(_fixup_dict(info)) + # get exif extension + try: + # exif field 0x8769 is an offset pointer to the location + # of the nested embedded exif ifd. + # It should be a long, but may be corrupted. + file.seek(exif[0x8769]) + except (KeyError, TypeError): + pass + else: + info = TiffImagePlugin.ImageFileDirectory_v1(head) + info.load(file) + exif.update(_fixup_dict(info)) + # get gpsinfo extension + try: + # exif field 0x8825 is an offset pointer to the location + # of the nested embedded gps exif ifd. + # It should be a long, but may be corrupted. + file.seek(exif[0x8825]) + except (KeyError, TypeError): + pass + else: + info = TiffImagePlugin.ImageFileDirectory_v1(head) + info.load(file) + exif[0x8825] = _fixup_dict(info) + + return exif + + +def _getmp(self): + # Extract MP information. This method was inspired by the "highly + # experimental" _getexif version that's been in use for years now, + # itself based on the ImageFileDirectory class in the TIFF plug-in. + + # The MP record essentially consists of a TIFF file embedded in a JPEG + # application marker. + try: + data = self.info["mp"] + except KeyError: + return None + file_contents = io.BytesIO(data) + head = file_contents.read(8) + endianness = '>' if head[:4] == b'\x4d\x4d\x00\x2a' else '<' + # process dictionary + try: + info = TiffImagePlugin.ImageFileDirectory_v2(head) + info.load(file_contents) + mp = dict(info) + except: + raise SyntaxError("malformed MP Index (unreadable directory)") + # it's an error not to have a number of images + try: + quant = mp[0xB001] + except KeyError: + raise SyntaxError("malformed MP Index (no number of images)") + # get MP entries + mpentries = [] + try: + rawmpentries = mp[0xB002] + for entrynum in range(0, quant): + unpackedentry = struct.unpack_from( + '{}LLLHH'.format(endianness), rawmpentries, entrynum * 16) + labels = ('Attribute', 'Size', 'DataOffset', 'EntryNo1', + 'EntryNo2') + mpentry = dict(zip(labels, unpackedentry)) + mpentryattr = { + 'DependentParentImageFlag': bool(mpentry['Attribute'] & + (1 << 31)), + 'DependentChildImageFlag': bool(mpentry['Attribute'] & + (1 << 30)), + 'RepresentativeImageFlag': bool(mpentry['Attribute'] & + (1 << 29)), + 'Reserved': (mpentry['Attribute'] & (3 << 27)) >> 27, + 'ImageDataFormat': (mpentry['Attribute'] & (7 << 24)) >> 24, + 'MPType': mpentry['Attribute'] & 0x00FFFFFF + } + if mpentryattr['ImageDataFormat'] == 0: + mpentryattr['ImageDataFormat'] = 'JPEG' + else: + raise SyntaxError("unsupported picture format in MPO") + mptypemap = { + 0x000000: 'Undefined', + 0x010001: 'Large Thumbnail (VGA Equivalent)', + 0x010002: 'Large Thumbnail (Full HD Equivalent)', + 0x020001: 'Multi-Frame Image (Panorama)', + 0x020002: 'Multi-Frame Image: (Disparity)', + 0x020003: 'Multi-Frame Image: (Multi-Angle)', + 0x030000: 'Baseline MP Primary Image' + } + mpentryattr['MPType'] = mptypemap.get(mpentryattr['MPType'], + 'Unknown') + mpentry['Attribute'] = mpentryattr + mpentries.append(mpentry) + mp[0xB002] = mpentries + except KeyError: + raise SyntaxError("malformed MP Index (bad MP Entry)") + # Next we should try and parse the individual image unique ID list; + # we don't because I've never seen this actually used in a real MPO + # file and so can't test it. + return mp + + +# -------------------------------------------------------------------- +# stuff to save JPEG files + +RAWMODE = { + "1": "L", + "L": "L", + "RGB": "RGB", + "RGBX": "RGB", + "CMYK": "CMYK;I", # assume adobe conventions + "YCbCr": "YCbCr", +} + +zigzag_index = (0, 1, 5, 6, 14, 15, 27, 28, + 2, 4, 7, 13, 16, 26, 29, 42, + 3, 8, 12, 17, 25, 30, 41, 43, + 9, 11, 18, 24, 31, 40, 44, 53, + 10, 19, 23, 32, 39, 45, 52, 54, + 20, 22, 33, 38, 46, 51, 55, 60, + 21, 34, 37, 47, 50, 56, 59, 61, + 35, 36, 48, 49, 57, 58, 62, 63) + +samplings = {(1, 1, 1, 1, 1, 1): 0, + (2, 1, 1, 1, 1, 1): 1, + (2, 2, 1, 1, 1, 1): 2, + } + + +def convert_dict_qtables(qtables): + qtables = [qtables[key] for key in range(len(qtables)) if key in qtables] + for idx, table in enumerate(qtables): + qtables[idx] = [table[i] for i in zigzag_index] + return qtables + + +def get_sampling(im): + # There's no subsampling when image have only 1 layer + # (grayscale images) or when they are CMYK (4 layers), + # so set subsampling to default value. + # + # NOTE: currently Pillow can't encode JPEG to YCCK format. + # If YCCK support is added in the future, subsampling code will have + # to be updated (here and in JpegEncode.c) to deal with 4 layers. + if not hasattr(im, 'layers') or im.layers in (1, 4): + return -1 + sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3] + return samplings.get(sampling, -1) + + +def _save(im, fp, filename): + + try: + rawmode = RAWMODE[im.mode] + except KeyError: + raise IOError("cannot write mode %s as JPEG" % im.mode) + + info = im.encoderinfo + + dpi = [int(round(x)) for x in info.get("dpi", (0, 0))] + + quality = info.get("quality", 0) + subsampling = info.get("subsampling", -1) + qtables = info.get("qtables") + + if quality == "keep": + quality = 0 + subsampling = "keep" + qtables = "keep" + elif quality in presets: + preset = presets[quality] + quality = 0 + subsampling = preset.get('subsampling', -1) + qtables = preset.get('quantization') + elif not isinstance(quality, int): + raise ValueError("Invalid quality setting") + else: + if subsampling in presets: + subsampling = presets[subsampling].get('subsampling', -1) + if isStringType(qtables) and qtables in presets: + qtables = presets[qtables].get('quantization') + + if subsampling == "4:4:4": + subsampling = 0 + elif subsampling == "4:2:2": + subsampling = 1 + elif subsampling == "4:2:0": + subsampling = 2 + elif subsampling == "4:1:1": + # For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0. + # Set 4:2:0 if someone is still using that value. + subsampling = 2 + elif subsampling == "keep": + if im.format != "JPEG": + raise ValueError( + "Cannot use 'keep' when original image is not a JPEG") + subsampling = get_sampling(im) + + def validate_qtables(qtables): + if qtables is None: + return qtables + if isStringType(qtables): + try: + lines = [int(num) for line in qtables.splitlines() + for num in line.split('#', 1)[0].split()] + except ValueError: + raise ValueError("Invalid quantization table") + else: + qtables = [lines[s:s+64] for s in range(0, len(lines), 64)] + if isinstance(qtables, (tuple, list, dict)): + if isinstance(qtables, dict): + qtables = convert_dict_qtables(qtables) + elif isinstance(qtables, tuple): + qtables = list(qtables) + if not (0 < len(qtables) < 5): + raise ValueError("None or too many quantization tables") + for idx, table in enumerate(qtables): + try: + if len(table) != 64: + raise TypeError + table = array.array('B', table) + except TypeError: + raise ValueError("Invalid quantization table") + else: + qtables[idx] = list(table) + return qtables + + if qtables == "keep": + if im.format != "JPEG": + raise ValueError( + "Cannot use 'keep' when original image is not a JPEG") + qtables = getattr(im, "quantization", None) + qtables = validate_qtables(qtables) + + extra = b"" + + icc_profile = info.get("icc_profile") + if icc_profile: + ICC_OVERHEAD_LEN = 14 + MAX_BYTES_IN_MARKER = 65533 + MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN + markers = [] + while icc_profile: + markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER]) + icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:] + i = 1 + for marker in markers: + size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker)) + extra += (b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) + + o8(len(markers)) + marker) + i += 1 + + # "progressive" is the official name, but older documentation + # says "progression" + # FIXME: issue a warning if the wrong form is used (post-1.1.7) + progressive = (info.get("progressive", False) or + info.get("progression", False)) + + optimize = info.get("optimize", False) + + # get keyword arguments + im.encoderconfig = ( + quality, + progressive, + info.get("smooth", 0), + optimize, + info.get("streamtype", 0), + dpi[0], dpi[1], + subsampling, + qtables, + extra, + info.get("exif", b"") + ) + + # if we optimize, libjpeg needs a buffer big enough to hold the whole image + # in a shot. Guessing on the size, at im.size bytes. (raw pixel size is + # channels*size, this is a value that's been used in a django patch. + # https://github.com/matthewwithanm/django-imagekit/issues/50 + bufsize = 0 + if optimize or progressive: + # CMYK can be bigger + if im.mode == 'CMYK': + bufsize = 4 * im.size[0] * im.size[1] + # keep sets quality to 0, but the actual value may be high. + elif quality >= 95 or quality == 0: + bufsize = 2 * im.size[0] * im.size[1] + else: + bufsize = im.size[0] * im.size[1] + + # The exif info needs to be written as one block, + APP1, + one spare byte. + # Ensure that our buffer is big enough. Same with the icc_profile block. + bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif", b"")) + 5, + len(extra) + 1) + + ImageFile._save(im, fp, [("jpeg", (0, 0)+im.size, 0, rawmode)], bufsize) + + +def _save_cjpeg(im, fp, filename): + # ALTERNATIVE: handle JPEGs via the IJG command line utilities. + import os + import subprocess + tempfile = im._dump() + subprocess.check_call(["cjpeg", "-outfile", filename, tempfile]) + try: + os.unlink(tempfile) + except OSError: + pass + + +## +# Factory for making JPEG and MPO instances +def jpeg_factory(fp=None, filename=None): + im = JpegImageFile(fp, filename) + try: + mpheader = im._getmp() + if mpheader[45057] > 1: + # It's actually an MPO + from .MpoImagePlugin import MpoImageFile + im = MpoImageFile(fp, filename) + except (TypeError, IndexError): + # It is really a JPEG + pass + except SyntaxError: + warnings.warn("Image appears to be a malformed MPO file, it will be " + "interpreted as a base JPEG file") + return im + + +# -------------------------------------------------------------------q- +# Registry stuff + +Image.register_open(JpegImageFile.format, jpeg_factory, _accept) +Image.register_save(JpegImageFile.format, _save) + +Image.register_extensions(JpegImageFile.format, [".jfif", ".jpe", ".jpg", ".jpeg"]) + +Image.register_mime(JpegImageFile.format, "image/jpeg") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8425f20be151a6a434e9eaa31cbf8bd3abcfc37d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegPresets.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegPresets.py new file mode 100644 index 0000000000000000000000000000000000000000..5f01f0d2d1ee19188220e9d61b02f98b8f2ebd4e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegPresets.py @@ -0,0 +1,241 @@ +""" +JPEG quality settings equivalent to the Photoshop settings. + +More presets can be added to the presets dict if needed. + +Can be use when saving JPEG file. + +To apply the preset, specify:: + + quality="preset_name" + +To apply only the quantization table:: + + qtables="preset_name" + +To apply only the subsampling setting:: + + subsampling="preset_name" + +Example:: + + im.save("image_name.jpg", quality="web_high") + + +Subsampling +----------- + +Subsampling is the practice of encoding images by implementing less resolution +for chroma information than for luma information. +(ref.: https://en.wikipedia.org/wiki/Chroma_subsampling) + +Possible subsampling values are 0, 1 and 2 that correspond to 4:4:4, 4:2:2 and +4:2:0. + +You can get the subsampling of a JPEG with the +`JpegImagePlugin.get_subsampling(im)` function. + + +Quantization tables +------------------- + +They are values use by the DCT (Discrete cosine transform) to remove +*unnecessary* information from the image (the lossy part of the compression). +(ref.: https://en.wikipedia.org/wiki/Quantization_matrix#Quantization_matrices, +https://en.wikipedia.org/wiki/JPEG#Quantization) + +You can get the quantization tables of a JPEG with:: + + im.quantization + +This will return a dict with a number of arrays. You can pass this dict +directly as the qtables argument when saving a JPEG. + +The tables format between im.quantization and quantization in presets differ in +3 ways: + +1. The base container of the preset is a list with sublists instead of dict. + dict[0] -> list[0], dict[1] -> list[1], ... +2. Each table in a preset is a list instead of an array. +3. The zigzag order is remove in the preset (needed by libjpeg >= 6a). + +You can convert the dict format to the preset format with the +`JpegImagePlugin.convert_dict_qtables(dict_qtables)` function. + +Libjpeg ref.: https://web.archive.org/web/20120328125543/http://www.jpegcameras.com/libjpeg/libjpeg-3.html + +""" + +presets = { + 'web_low': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [20, 16, 25, 39, 50, 46, 62, 68, + 16, 18, 23, 38, 38, 53, 65, 68, + 25, 23, 31, 38, 53, 65, 68, 68, + 39, 38, 38, 53, 65, 68, 68, 68, + 50, 38, 53, 65, 68, 68, 68, 68, + 46, 53, 65, 68, 68, 68, 68, 68, + 62, 65, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68], + [21, 25, 32, 38, 54, 68, 68, 68, + 25, 28, 24, 38, 54, 68, 68, 68, + 32, 24, 32, 43, 66, 68, 68, 68, + 38, 38, 43, 53, 68, 68, 68, 68, + 54, 54, 66, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68] + ]}, + 'web_medium': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [16, 11, 11, 16, 23, 27, 31, 30, + 11, 12, 12, 15, 20, 23, 23, 30, + 11, 12, 13, 16, 23, 26, 35, 47, + 16, 15, 16, 23, 26, 37, 47, 64, + 23, 20, 23, 26, 39, 51, 64, 64, + 27, 23, 26, 37, 51, 64, 64, 64, + 31, 23, 35, 47, 64, 64, 64, 64, + 30, 30, 47, 64, 64, 64, 64, 64], + [17, 15, 17, 21, 20, 26, 38, 48, + 15, 19, 18, 17, 20, 26, 35, 43, + 17, 18, 20, 22, 26, 30, 46, 53, + 21, 17, 22, 28, 30, 39, 53, 64, + 20, 20, 26, 30, 39, 48, 64, 64, + 26, 26, 30, 39, 48, 63, 64, 64, + 38, 35, 46, 53, 64, 64, 64, 64, + 48, 43, 53, 64, 64, 64, 64, 64] + ]}, + 'web_high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [6, 4, 4, 6, 9, 11, 12, 16, + 4, 5, 5, 6, 8, 10, 12, 12, + 4, 5, 5, 6, 10, 12, 14, 19, + 6, 6, 6, 11, 12, 15, 19, 28, + 9, 8, 10, 12, 16, 20, 27, 31, + 11, 10, 12, 15, 20, 27, 31, 31, + 12, 12, 14, 19, 27, 31, 31, 31, + 16, 12, 19, 28, 31, 31, 31, 31], + [7, 7, 13, 24, 26, 31, 31, 31, + 7, 12, 16, 21, 31, 31, 31, 31, + 13, 16, 17, 31, 31, 31, 31, 31, + 24, 21, 31, 31, 31, 31, 31, 31, + 26, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31] + ]}, + 'web_very_high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 4, 5, 7, 9, + 2, 2, 2, 4, 5, 7, 9, 12, + 3, 3, 4, 5, 8, 10, 12, 12, + 4, 4, 5, 7, 10, 12, 12, 12, + 5, 5, 7, 9, 12, 12, 12, 12, + 6, 6, 9, 12, 12, 12, 12, 12], + [3, 3, 5, 9, 13, 15, 15, 15, + 3, 4, 6, 11, 14, 12, 12, 12, + 5, 6, 9, 14, 12, 12, 12, 12, + 9, 11, 14, 12, 12, 12, 12, 12, + 13, 14, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'web_maximum': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 1, 1, 2, 2, 3, + 1, 1, 1, 1, 2, 2, 3, 3, + 1, 1, 1, 2, 2, 3, 3, 3, + 1, 1, 2, 2, 3, 3, 3, 3], + [1, 1, 1, 2, 2, 3, 3, 3, + 1, 1, 1, 2, 3, 3, 3, 3, + 1, 1, 1, 3, 3, 3, 3, 3, + 2, 2, 3, 3, 3, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3] + ]}, + 'low': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [18, 14, 14, 21, 30, 35, 34, 17, + 14, 16, 16, 19, 26, 23, 12, 12, + 14, 16, 17, 21, 23, 12, 12, 12, + 21, 19, 21, 23, 12, 12, 12, 12, + 30, 26, 23, 12, 12, 12, 12, 12, + 35, 23, 12, 12, 12, 12, 12, 12, + 34, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12], + [20, 19, 22, 27, 20, 20, 17, 17, + 19, 25, 23, 14, 14, 12, 12, 12, + 22, 23, 14, 14, 12, 12, 12, 12, + 27, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'medium': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [12, 8, 8, 12, 17, 21, 24, 17, + 8, 9, 9, 11, 15, 19, 12, 12, + 8, 9, 10, 12, 19, 12, 12, 12, + 12, 11, 12, 21, 12, 12, 12, 12, + 17, 15, 19, 12, 12, 12, 12, 12, + 21, 19, 12, 12, 12, 12, 12, 12, + 24, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12], + [13, 11, 13, 16, 20, 20, 17, 17, + 11, 14, 14, 14, 14, 12, 12, 12, + 13, 14, 14, 14, 12, 12, 12, 12, + 16, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [6, 4, 4, 6, 9, 11, 12, 16, + 4, 5, 5, 6, 8, 10, 12, 12, + 4, 5, 5, 6, 10, 12, 12, 12, + 6, 6, 6, 11, 12, 12, 12, 12, + 9, 8, 10, 12, 12, 12, 12, 12, + 11, 10, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, + 16, 12, 12, 12, 12, 12, 12, 12], + [7, 7, 13, 24, 20, 20, 17, 17, + 7, 12, 16, 14, 14, 12, 12, 12, + 13, 16, 14, 14, 12, 12, 12, 12, + 24, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'maximum': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 4, 5, 7, 9, + 2, 2, 2, 4, 5, 7, 9, 12, + 3, 3, 4, 5, 8, 10, 12, 12, + 4, 4, 5, 7, 10, 12, 12, 12, + 5, 5, 7, 9, 12, 12, 12, 12, + 6, 6, 9, 12, 12, 12, 12, 12], + [3, 3, 5, 9, 13, 15, 15, 15, + 3, 4, 6, 10, 14, 12, 12, 12, + 5, 6, 9, 14, 12, 12, 12, 12, + 9, 10, 14, 12, 12, 12, 12, 12, + 13, 14, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12] + ]}, +} diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegPresets.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegPresets.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d608137cebbf7a165d90a6a255cf96e537364ef3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/JpegPresets.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/McIdasImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/McIdasImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..06da33f7778971e5374644eb5cbe2a37f97e109c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/McIdasImagePlugin.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Basic McIdas support for PIL +# +# History: +# 1997-05-05 fl Created (8-bit images only) +# 2009-03-08 fl Added 16/32-bit support. +# +# Thanks to Richard Jones and Craig Swank for specs and samples. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +import struct +from . import Image, ImageFile + +__version__ = "0.2" + + +def _accept(s): + return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04" + + +## +# Image plugin for McIdas area images. + +class McIdasImageFile(ImageFile.ImageFile): + + format = "MCIDAS" + format_description = "McIdas area file" + + def _open(self): + + # parse area file directory + s = self.fp.read(256) + if not _accept(s) or len(s) != 256: + raise SyntaxError("not an McIdas area file") + + self.area_descriptor_raw = s + self.area_descriptor = w = [0] + list(struct.unpack("!64i", s)) + + # get mode + if w[11] == 1: + mode = rawmode = "L" + elif w[11] == 2: + # FIXME: add memory map support + mode = "I" + rawmode = "I;16B" + elif w[11] == 4: + # FIXME: add memory map support + mode = "I" + rawmode = "I;32B" + else: + raise SyntaxError("unsupported McIdas format") + + self.mode = mode + self.size = w[10], w[9] + + offset = w[34] + w[15] + stride = w[15] + w[10]*w[11]*w[14] + + self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))] + + +# -------------------------------------------------------------------- +# registry + +Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept) + +# no default extension diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/McIdasImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/McIdasImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f870d3e005fc9c55b6a51bc5cdf045e337cfdfe9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/McIdasImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MicImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MicImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..1dbb6a588e15b17339b135260f0b41cb572683f7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MicImagePlugin.py @@ -0,0 +1,107 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Microsoft Image Composer support for PIL +# +# Notes: +# uses TiffImagePlugin.py to read the actual image streams +# +# History: +# 97-01-20 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, TiffImagePlugin + +import olefile + +__version__ = "0.1" + + +# +# -------------------------------------------------------------------- + + +def _accept(prefix): + return prefix[:8] == olefile.MAGIC + + +## +# Image plugin for Microsoft's Image Composer file format. + +class MicImageFile(TiffImagePlugin.TiffImageFile): + + format = "MIC" + format_description = "Microsoft Image Composer" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # read the OLE directory and see if this is a likely + # to be a Microsoft Image Composer file + + try: + self.ole = olefile.OleFileIO(self.fp) + except IOError: + raise SyntaxError("not an MIC file; invalid OLE file") + + # find ACI subfiles with Image members (maybe not the + # best way to identify MIC files, but what the... ;-) + + self.images = [] + for path in self.ole.listdir(): + if path[1:] and path[0][-4:] == ".ACI" and path[1] == "Image": + self.images.append(path) + + # if we didn't find any images, this is probably not + # an MIC file. + if not self.images: + raise SyntaxError("not an MIC file; no image entries") + + self.__fp = self.fp + self.frame = None + + if len(self.images) > 1: + self.category = Image.CONTAINER + + self.seek(0) + + @property + def n_frames(self): + return len(self.images) + + @property + def is_animated(self): + return len(self.images) > 1 + + def seek(self, frame): + if not self._seek_check(frame): + return + try: + filename = self.images[frame] + except IndexError: + raise EOFError("no such frame") + + self.fp = self.ole.openstream(filename) + + TiffImagePlugin.TiffImageFile._open(self) + + self.frame = frame + + def tell(self): + + return self.frame + + +# +# -------------------------------------------------------------------- + +Image.register_open(MicImageFile.format, MicImageFile, _accept) + +Image.register_extension(MicImageFile.format, ".mic") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MicImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MicImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d6167efe7c31f580fa7f971379d8c44c461f4f4 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MicImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpegImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpegImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..fca7f9d9fe4f8af4d6e9b17b3b96bc717538b800 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpegImagePlugin.py @@ -0,0 +1,85 @@ +# +# The Python Imaging Library. +# $Id$ +# +# MPEG file handling +# +# History: +# 95-09-09 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile +from ._binary import i8 + +__version__ = "0.1" + + +# +# Bitstream parser + +class BitStream(object): + + def __init__(self, fp): + self.fp = fp + self.bits = 0 + self.bitbuffer = 0 + + def next(self): + return i8(self.fp.read(1)) + + def peek(self, bits): + while self.bits < bits: + c = self.next() + if c < 0: + self.bits = 0 + continue + self.bitbuffer = (self.bitbuffer << 8) + c + self.bits += 8 + return self.bitbuffer >> (self.bits - bits) & (1 << bits) - 1 + + def skip(self, bits): + while self.bits < bits: + self.bitbuffer = (self.bitbuffer << 8) + i8(self.fp.read(1)) + self.bits += 8 + self.bits = self.bits - bits + + def read(self, bits): + v = self.peek(bits) + self.bits = self.bits - bits + return v + + +## +# Image plugin for MPEG streams. This plugin can identify a stream, +# but it cannot read it. + +class MpegImageFile(ImageFile.ImageFile): + + format = "MPEG" + format_description = "MPEG" + + def _open(self): + + s = BitStream(self.fp) + + if s.read(32) != 0x1B3: + raise SyntaxError("not an MPEG file") + + self.mode = "RGB" + self.size = s.read(12), s.read(12) + + +# -------------------------------------------------------------------- +# Registry stuff + +Image.register_open(MpegImageFile.format, MpegImageFile) + +Image.register_extensions(MpegImageFile.format, [".mpg", ".mpeg"]) + +Image.register_mime(MpegImageFile.format, "video/mpeg") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpegImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpegImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7f4b0125355d8383e9679f8bcf9a65b85972944 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpegImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpoImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpoImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..460ccec27f72c688222184c25e58d35aa62def4d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpoImagePlugin.py @@ -0,0 +1,99 @@ +# +# The Python Imaging Library. +# $Id$ +# +# MPO file handling +# +# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the +# Camera & Imaging Products Association) +# +# The multi-picture object combines multiple JPEG images (with a modified EXIF +# data format) into a single file. While it can theoretically be used much like +# a GIF animation, it is commonly used to represent 3D photographs and is (as +# of this writing) the most commonly used format by 3D cameras. +# +# History: +# 2014-03-13 Feneric Created +# +# See the README file for information on usage and redistribution. +# + +from . import Image, JpegImagePlugin + +__version__ = "0.1" + + +def _accept(prefix): + return JpegImagePlugin._accept(prefix) + + +def _save(im, fp, filename): + # Note that we can only save the current frame at present + return JpegImagePlugin._save(im, fp, filename) + + +## +# Image plugin for MPO images. + +class MpoImageFile(JpegImagePlugin.JpegImageFile): + + format = "MPO" + format_description = "MPO (CIPA DC-007)" + _close_exclusive_fp_after_loading = False + + def _open(self): + self.fp.seek(0) # prep the fp in order to pass the JPEG test + JpegImagePlugin.JpegImageFile._open(self) + self.mpinfo = self._getmp() + self.__framecount = self.mpinfo[0xB001] + self.__mpoffsets = [mpent['DataOffset'] + self.info['mpoffset'] + for mpent in self.mpinfo[0xB002]] + self.__mpoffsets[0] = 0 + # Note that the following assertion will only be invalid if something + # gets broken within JpegImagePlugin. + assert self.__framecount == len(self.__mpoffsets) + del self.info['mpoffset'] # no longer needed + self.__fp = self.fp # FIXME: hack + self.__fp.seek(self.__mpoffsets[0]) # get ready to read first frame + self.__frame = 0 + self.offset = 0 + # for now we can only handle reading and individual frame extraction + self.readonly = 1 + + def load_seek(self, pos): + self.__fp.seek(pos) + + @property + def n_frames(self): + return self.__framecount + + @property + def is_animated(self): + return self.__framecount > 1 + + def seek(self, frame): + if not self._seek_check(frame): + return + self.fp = self.__fp + self.offset = self.__mpoffsets[frame] + self.tile = [ + ("jpeg", (0, 0) + self.size, self.offset, (self.mode, "")) + ] + self.__frame = frame + + def tell(self): + return self.__frame + + +# -------------------------------------------------------------------q- +# Registry stuff + +# Note that since MPO shares a factory with JPEG, we do not need to do a +# separate registration for it here. +# Image.register_open(MpoImageFile.format, +# JpegImagePlugin.jpeg_factory, _accept) +Image.register_save(MpoImageFile.format, _save) + +Image.register_extension(MpoImageFile.format, ".mpo") + +Image.register_mime(MpoImageFile.format, "image/mpo") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpoImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpoImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c493adb6a2aba4177411777e6d59fd247dca898 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MpoImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MspImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MspImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..9692d116241497f8d531bc057ef14539269cc666 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MspImagePlugin.py @@ -0,0 +1,192 @@ +# +# The Python Imaging Library. +# +# MSP file handling +# +# This is the format used by the Paint program in Windows 1 and 2. +# +# History: +# 95-09-05 fl Created +# 97-01-03 fl Read/write MSP images +# 17-02-21 es Fixed RLE interpretation +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995-97. +# Copyright (c) Eric Soroos 2017. +# +# See the README file for information on usage and redistribution. +# +# More info on this format: https://archive.org/details/gg243631 +# Page 313: +# Figure 205. Windows Paint Version 1: "DanM" Format +# Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03 +# +# See also: http://www.fileformat.info/format/mspaint/egff.htm + +from . import Image, ImageFile +from ._binary import i16le as i16, o16le as o16, i8 +import struct +import io + +__version__ = "0.1" + + +# +# read MSP files + + +def _accept(prefix): + return prefix[:4] in [b"DanM", b"LinS"] + + +## +# Image plugin for Windows MSP images. This plugin supports both +# uncompressed (Windows 1.0). + +class MspImageFile(ImageFile.ImageFile): + + format = "MSP" + format_description = "Windows Paint" + + def _open(self): + + # Header + s = self.fp.read(32) + if s[:4] not in [b"DanM", b"LinS"]: + raise SyntaxError("not an MSP file") + + # Header checksum + checksum = 0 + for i in range(0, 32, 2): + checksum = checksum ^ i16(s[i:i+2]) + if checksum != 0: + raise SyntaxError("bad MSP checksum") + + self.mode = "1" + self.size = i16(s[4:]), i16(s[6:]) + + if s[:4] == b"DanM": + self.tile = [("raw", (0, 0)+self.size, 32, ("1", 0, 1))] + else: + self.tile = [("MSP", (0, 0)+self.size, 32, None)] + + +class MspDecoder(ImageFile.PyDecoder): + # The algo for the MSP decoder is from + # http://www.fileformat.info/format/mspaint/egff.htm + # cc-by-attribution -- That page references is taken from the + # Encyclopedia of Graphics File Formats and is licensed by + # O'Reilly under the Creative Common/Attribution license + # + # For RLE encoded files, the 32byte header is followed by a scan + # line map, encoded as one 16bit word of encoded byte length per + # line. + # + # NOTE: the encoded length of the line can be 0. This was not + # handled in the previous version of this encoder, and there's no + # mention of how to handle it in the documentation. From the few + # examples I've seen, I've assumed that it is a fill of the + # background color, in this case, white. + # + # + # Pseudocode of the decoder: + # Read a BYTE value as the RunType + # If the RunType value is zero + # Read next byte as the RunCount + # Read the next byte as the RunValue + # Write the RunValue byte RunCount times + # If the RunType value is non-zero + # Use this value as the RunCount + # Read and write the next RunCount bytes literally + # + # e.g.: + # 0x00 03 ff 05 00 01 02 03 04 + # would yield the bytes: + # 0xff ff ff 00 01 02 03 04 + # + # which are then interpreted as a bit packed mode '1' image + + _pulls_fd = True + + def decode(self, buffer): + + img = io.BytesIO() + blank_line = bytearray((0xff,)*((self.state.xsize+7)//8)) + try: + self.fd.seek(32) + rowmap = struct.unpack_from("<%dH" % (self.state.ysize), + self.fd.read(self.state.ysize*2)) + except struct.error: + raise IOError("Truncated MSP file in row map") + + for x, rowlen in enumerate(rowmap): + try: + if rowlen == 0: + img.write(blank_line) + continue + row = self.fd.read(rowlen) + if len(row) != rowlen: + raise IOError("Truncated MSP file, expected %d bytes on row %s", + (rowlen, x)) + idx = 0 + while idx < rowlen: + runtype = i8(row[idx]) + idx += 1 + if runtype == 0: + (runcount, runval) = struct.unpack_from("Bc", row, idx) + img.write(runval * runcount) + idx += 2 + else: + runcount = runtype + img.write(row[idx:idx+runcount]) + idx += runcount + + except struct.error: + raise IOError("Corrupted MSP file in row %d" % x) + + self.set_as_raw(img.getvalue(), ("1", 0, 1)) + + return 0, 0 + + +Image.register_decoder('MSP', MspDecoder) + + +# +# write MSP files (uncompressed only) + + +def _save(im, fp, filename): + + if im.mode != "1": + raise IOError("cannot write mode %s as MSP" % im.mode) + + # create MSP header + header = [0] * 16 + + header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1 + header[2], header[3] = im.size + header[4], header[5] = 1, 1 + header[6], header[7] = 1, 1 + header[8], header[9] = im.size + + checksum = 0 + for h in header: + checksum = checksum ^ h + header[12] = checksum # FIXME: is this the right field? + + # header + for h in header: + fp.write(o16(h)) + + # image body + ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 32, ("1", 0, 1))]) + + +# +# registry + +Image.register_open(MspImageFile.format, MspImageFile, _accept) +Image.register_save(MspImageFile.format, _save) + +Image.register_extension(MspImageFile.format, ".msp") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MspImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MspImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8375115b424103cb984e3d133cdd8651582c43fd Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/MspImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/OleFileIO.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/OleFileIO.py new file mode 100644 index 0000000000000000000000000000000000000000..b3caa10d5e45617f95cb261176dedcf810e327f7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/OleFileIO.py @@ -0,0 +1,4 @@ +raise ImportError( + 'PIL.OleFileIO is deprecated. Use the olefile Python package ' + 'instead. This module will be removed in a future version.' +) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/OleFileIO.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/OleFileIO.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2eb3a19734c4a1181476b1c94143d78bba48ee4 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/OleFileIO.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PSDraw.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PSDraw.py new file mode 100644 index 0000000000000000000000000000000000000000..d2ded6fea028e32cc44cc48d3305e19fe6248c4f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PSDraw.py @@ -0,0 +1,237 @@ +# +# The Python Imaging Library +# $Id$ +# +# simple postscript graphics interface +# +# History: +# 1996-04-20 fl Created +# 1999-01-10 fl Added gsave/grestore to image method +# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge) +# +# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved. +# Copyright (c) 1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +from . import EpsImagePlugin +from ._util import py3 +import sys + +## +# Simple Postscript graphics interface. + + +class PSDraw(object): + """ + Sets up printing to the given file. If **fp** is omitted, + :py:attr:`sys.stdout` is assumed. + """ + + def __init__(self, fp=None): + if not fp: + fp = sys.stdout + self.fp = fp + + def _fp_write(self, to_write): + if not py3 or self.fp == sys.stdout: + self.fp.write(to_write) + else: + self.fp.write(bytes(to_write, 'UTF-8')) + + def begin_document(self, id=None): + """Set up printing of a document. (Write Postscript DSC header.)""" + # FIXME: incomplete + self._fp_write("%!PS-Adobe-3.0\n" + "save\n" + "/showpage { } def\n" + "%%EndComments\n" + "%%BeginDocument\n") + # self._fp_write(ERROR_PS) # debugging! + self._fp_write(EDROFF_PS) + self._fp_write(VDI_PS) + self._fp_write("%%EndProlog\n") + self.isofont = {} + + def end_document(self): + """Ends printing. (Write Postscript DSC footer.)""" + self._fp_write("%%EndDocument\n" + "restore showpage\n" + "%%End\n") + if hasattr(self.fp, "flush"): + self.fp.flush() + + def setfont(self, font, size): + """ + Selects which font to use. + + :param font: A Postscript font name + :param size: Size in points. + """ + if font not in self.isofont: + # reencode font + self._fp_write("/PSDraw-%s ISOLatin1Encoding /%s E\n" % + (font, font)) + self.isofont[font] = 1 + # rough + self._fp_write("/F0 %d /PSDraw-%s F\n" % (size, font)) + + def line(self, xy0, xy1): + """ + Draws a line between the two points. Coordinates are given in + Postscript point coordinates (72 points per inch, (0, 0) is the lower + left corner of the page). + """ + xy = xy0 + xy1 + self._fp_write("%d %d %d %d Vl\n" % xy) + + def rectangle(self, box): + """ + Draws a rectangle. + + :param box: A 4-tuple of integers whose order and function is currently + undocumented. + + Hint: the tuple is passed into this format string: + + .. code-block:: python + + %d %d M %d %d 0 Vr\n + """ + self._fp_write("%d %d M %d %d 0 Vr\n" % box) + + def text(self, xy, text): + """ + Draws text at the given position. You must use + :py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method. + """ + text = "\\(".join(text.split("(")) + text = "\\)".join(text.split(")")) + xy = xy + (text,) + self._fp_write("%d %d M (%s) S\n" % xy) + + def image(self, box, im, dpi=None): + """Draw a PIL image, centered in the given box.""" + # default resolution depends on mode + if not dpi: + if im.mode == "1": + dpi = 200 # fax + else: + dpi = 100 # greyscale + # image size (on paper) + x = float(im.size[0] * 72) / dpi + y = float(im.size[1] * 72) / dpi + # max allowed size + xmax = float(box[2] - box[0]) + ymax = float(box[3] - box[1]) + if x > xmax: + y = y * xmax / x + x = xmax + if y > ymax: + x = x * ymax / y + y = ymax + dx = (xmax - x) / 2 + box[0] + dy = (ymax - y) / 2 + box[1] + self._fp_write("gsave\n%f %f translate\n" % (dx, dy)) + if (x, y) != im.size: + # EpsImagePlugin._save prints the image at (0,0,xsize,ysize) + sx = x / im.size[0] + sy = y / im.size[1] + self._fp_write("%f %f scale\n" % (sx, sy)) + EpsImagePlugin._save(im, self.fp, None, 0) + self._fp_write("\ngrestore\n") + +# -------------------------------------------------------------------- +# Postscript driver + +# +# EDROFF.PS -- Postscript driver for Edroff 2 +# +# History: +# 94-01-25 fl: created (edroff 2.04) +# +# Copyright (c) Fredrik Lundh 1994. +# + + +EDROFF_PS = """\ +/S { show } bind def +/P { moveto show } bind def +/M { moveto } bind def +/X { 0 rmoveto } bind def +/Y { 0 exch rmoveto } bind def +/E { findfont + dup maxlength dict begin + { + 1 index /FID ne { def } { pop pop } ifelse + } forall + /Encoding exch def + dup /FontName exch def + currentdict end definefont pop +} bind def +/F { findfont exch scalefont dup setfont + [ exch /setfont cvx ] cvx bind def +} bind def +""" + +# +# VDI.PS -- Postscript driver for VDI meta commands +# +# History: +# 94-01-25 fl: created (edroff 2.04) +# +# Copyright (c) Fredrik Lundh 1994. +# + +VDI_PS = """\ +/Vm { moveto } bind def +/Va { newpath arcn stroke } bind def +/Vl { moveto lineto stroke } bind def +/Vc { newpath 0 360 arc closepath } bind def +/Vr { exch dup 0 rlineto + exch dup neg 0 exch rlineto + exch neg 0 rlineto + 0 exch rlineto + 100 div setgray fill 0 setgray } bind def +/Tm matrix def +/Ve { Tm currentmatrix pop + translate scale newpath 0 0 .5 0 360 arc closepath + Tm setmatrix +} bind def +/Vf { currentgray exch setgray fill setgray } bind def +""" + +# +# ERROR.PS -- Error handler +# +# History: +# 89-11-21 fl: created (pslist 1.10) +# + +ERROR_PS = """\ +/landscape false def +/errorBUF 200 string def +/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def +errordict begin /handleerror { + initmatrix /Courier findfont 10 scalefont setfont + newpath 72 720 moveto $error begin /newerror false def + (PostScript Error) show errorNL errorNL + (Error: ) show + /errorname load errorBUF cvs show errorNL errorNL + (Command: ) show + /command load dup type /stringtype ne { errorBUF cvs } if show + errorNL errorNL + (VMstatus: ) show + vmstatus errorBUF cvs show ( bytes available, ) show + errorBUF cvs show ( bytes used at level ) show + errorBUF cvs show errorNL errorNL + (Operand stargck: ) show errorNL /ostargck load { + dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL + } forall errorNL + (Execution stargck: ) show errorNL /estargck load { + dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL + } forall + end showpage +} def end +""" diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PSDraw.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PSDraw.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bf0c2ec0761c67e647eb998f7e0f8d55fdab581 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PSDraw.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PaletteFile.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PaletteFile.py new file mode 100644 index 0000000000000000000000000000000000000000..9ed69d687dc4ac31c272f66cff774c566752b270 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PaletteFile.py @@ -0,0 +1,55 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read simple, teragon-style palette files +# +# History: +# 97-08-23 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +from ._binary import o8 + + +## +# File handler for Teragon-style palette files. + +class PaletteFile(object): + + rawmode = "RGB" + + def __init__(self, fp): + + self.palette = [(i, i, i) for i in range(256)] + + while True: + + s = fp.readline() + + if not s: + break + if s[0:1] == b"#": + continue + if len(s) > 100: + raise SyntaxError("bad palette file") + + v = [int(x) for x in s.split()] + try: + [i, r, g, b] = v + except ValueError: + [i, r] = v + g = b = r + + if 0 <= i <= 255: + self.palette[i] = o8(r) + o8(g) + o8(b) + + self.palette = b"".join(self.palette) + + def getpalette(self): + + return self.palette, self.rawmode diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PaletteFile.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PaletteFile.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51205b7e1132d74f5d7184fb443eecd307793551 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PaletteFile.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PalmImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PalmImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..7d7b16579c5e09259fe95433492cd01f764a96b6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PalmImagePlugin.py @@ -0,0 +1,236 @@ +# +# The Python Imaging Library. +# $Id$ +# + +## +# Image plugin for Palm pixmap images (output only). +## + +from . import Image, ImageFile +from ._binary import o8, o16be as o16b + +__version__ = "1.0" + +_Palm8BitColormapValues = ( + (255, 255, 255), (255, 204, 255), (255, 153, 255), (255, 102, 255), + (255, 51, 255), (255, 0, 255), (255, 255, 204), (255, 204, 204), + (255, 153, 204), (255, 102, 204), (255, 51, 204), (255, 0, 204), + (255, 255, 153), (255, 204, 153), (255, 153, 153), (255, 102, 153), + (255, 51, 153), (255, 0, 153), (204, 255, 255), (204, 204, 255), + (204, 153, 255), (204, 102, 255), (204, 51, 255), (204, 0, 255), + (204, 255, 204), (204, 204, 204), (204, 153, 204), (204, 102, 204), + (204, 51, 204), (204, 0, 204), (204, 255, 153), (204, 204, 153), + (204, 153, 153), (204, 102, 153), (204, 51, 153), (204, 0, 153), + (153, 255, 255), (153, 204, 255), (153, 153, 255), (153, 102, 255), + (153, 51, 255), (153, 0, 255), (153, 255, 204), (153, 204, 204), + (153, 153, 204), (153, 102, 204), (153, 51, 204), (153, 0, 204), + (153, 255, 153), (153, 204, 153), (153, 153, 153), (153, 102, 153), + (153, 51, 153), (153, 0, 153), (102, 255, 255), (102, 204, 255), + (102, 153, 255), (102, 102, 255), (102, 51, 255), (102, 0, 255), + (102, 255, 204), (102, 204, 204), (102, 153, 204), (102, 102, 204), + (102, 51, 204), (102, 0, 204), (102, 255, 153), (102, 204, 153), + (102, 153, 153), (102, 102, 153), (102, 51, 153), (102, 0, 153), + (51, 255, 255), (51, 204, 255), (51, 153, 255), (51, 102, 255), + (51, 51, 255), (51, 0, 255), (51, 255, 204), (51, 204, 204), + (51, 153, 204), (51, 102, 204), (51, 51, 204), (51, 0, 204), + (51, 255, 153), (51, 204, 153), (51, 153, 153), (51, 102, 153), + (51, 51, 153), (51, 0, 153), (0, 255, 255), (0, 204, 255), + (0, 153, 255), (0, 102, 255), (0, 51, 255), (0, 0, 255), + (0, 255, 204), (0, 204, 204), (0, 153, 204), (0, 102, 204), + (0, 51, 204), (0, 0, 204), (0, 255, 153), (0, 204, 153), + (0, 153, 153), (0, 102, 153), (0, 51, 153), (0, 0, 153), + (255, 255, 102), (255, 204, 102), (255, 153, 102), (255, 102, 102), + (255, 51, 102), (255, 0, 102), (255, 255, 51), (255, 204, 51), + (255, 153, 51), (255, 102, 51), (255, 51, 51), (255, 0, 51), + (255, 255, 0), (255, 204, 0), (255, 153, 0), (255, 102, 0), + (255, 51, 0), (255, 0, 0), (204, 255, 102), (204, 204, 102), + (204, 153, 102), (204, 102, 102), (204, 51, 102), (204, 0, 102), + (204, 255, 51), (204, 204, 51), (204, 153, 51), (204, 102, 51), + (204, 51, 51), (204, 0, 51), (204, 255, 0), (204, 204, 0), + (204, 153, 0), (204, 102, 0), (204, 51, 0), (204, 0, 0), + (153, 255, 102), (153, 204, 102), (153, 153, 102), (153, 102, 102), + (153, 51, 102), (153, 0, 102), (153, 255, 51), (153, 204, 51), + (153, 153, 51), (153, 102, 51), (153, 51, 51), (153, 0, 51), + (153, 255, 0), (153, 204, 0), (153, 153, 0), (153, 102, 0), + (153, 51, 0), (153, 0, 0), (102, 255, 102), (102, 204, 102), + (102, 153, 102), (102, 102, 102), (102, 51, 102), (102, 0, 102), + (102, 255, 51), (102, 204, 51), (102, 153, 51), (102, 102, 51), + (102, 51, 51), (102, 0, 51), (102, 255, 0), (102, 204, 0), + (102, 153, 0), (102, 102, 0), (102, 51, 0), (102, 0, 0), + (51, 255, 102), (51, 204, 102), (51, 153, 102), (51, 102, 102), + (51, 51, 102), (51, 0, 102), (51, 255, 51), (51, 204, 51), + (51, 153, 51), (51, 102, 51), (51, 51, 51), (51, 0, 51), + (51, 255, 0), (51, 204, 0), (51, 153, 0), (51, 102, 0), + (51, 51, 0), (51, 0, 0), (0, 255, 102), (0, 204, 102), + (0, 153, 102), (0, 102, 102), (0, 51, 102), (0, 0, 102), + (0, 255, 51), (0, 204, 51), (0, 153, 51), (0, 102, 51), + (0, 51, 51), (0, 0, 51), (0, 255, 0), (0, 204, 0), + (0, 153, 0), (0, 102, 0), (0, 51, 0), (17, 17, 17), + (34, 34, 34), (68, 68, 68), (85, 85, 85), (119, 119, 119), + (136, 136, 136), (170, 170, 170), (187, 187, 187), (221, 221, 221), + (238, 238, 238), (192, 192, 192), (128, 0, 0), (128, 0, 128), + (0, 128, 0), (0, 128, 128), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)) + + +# so build a prototype image to be used for palette resampling +def build_prototype_image(): + image = Image.new("L", (1, len(_Palm8BitColormapValues))) + image.putdata(list(range(len(_Palm8BitColormapValues)))) + palettedata = () + for colormapValue in _Palm8BitColormapValues: + palettedata += colormapValue + palettedata += (0, 0, 0)*(256 - len(_Palm8BitColormapValues)) + image.putpalette(palettedata) + return image + + +Palm8BitColormapImage = build_prototype_image() + +# OK, we now have in Palm8BitColormapImage, +# a "P"-mode image with the right palette +# +# -------------------------------------------------------------------- + +_FLAGS = { + "custom-colormap": 0x4000, + "is-compressed": 0x8000, + "has-transparent": 0x2000, + } + +_COMPRESSION_TYPES = { + "none": 0xFF, + "rle": 0x01, + "scanline": 0x00, + } + + +# +# -------------------------------------------------------------------- + +## +# (Internal) Image save plugin for the Palm format. + +def _save(im, fp, filename): + + if im.mode == "P": + + # we assume this is a color Palm image with the standard colormap, + # unless the "info" dict has a "custom-colormap" field + + rawmode = "P" + bpp = 8 + version = 1 + + elif (im.mode == "L" and + "bpp" in im.encoderinfo and + im.encoderinfo["bpp"] in (1, 2, 4)): + + # this is 8-bit grayscale, so we shift it to get the high-order bits, + # and invert it because + # Palm does greyscale from white (0) to black (1) + bpp = im.encoderinfo["bpp"] + im = im.point( + lambda x, shift=8-bpp, maxval=(1 << bpp)-1: maxval - (x >> shift)) + # we ignore the palette here + im.mode = "P" + rawmode = "P;" + str(bpp) + version = 1 + + elif im.mode == "L" and "bpp" in im.info and im.info["bpp"] in (1, 2, 4): + + # here we assume that even though the inherent mode is 8-bit grayscale, + # only the lower bpp bits are significant. + # We invert them to match the Palm. + bpp = im.info["bpp"] + im = im.point(lambda x, maxval=(1 << bpp)-1: maxval - (x & maxval)) + # we ignore the palette here + im.mode = "P" + rawmode = "P;" + str(bpp) + version = 1 + + elif im.mode == "1": + + # monochrome -- write it inverted, as is the Palm standard + rawmode = "1;I" + bpp = 1 + version = 0 + + else: + + raise IOError("cannot write mode %s as Palm" % im.mode) + + # + # make sure image data is available + im.load() + + # write header + + cols = im.size[0] + rows = im.size[1] + + rowbytes = int((cols + (16//bpp - 1)) / (16 // bpp)) * 2 + transparent_index = 0 + compression_type = _COMPRESSION_TYPES["none"] + + flags = 0 + if im.mode == "P" and "custom-colormap" in im.info: + flags = flags & _FLAGS["custom-colormap"] + colormapsize = 4 * 256 + 2 + colormapmode = im.palette.mode + colormap = im.getdata().getpalette() + else: + colormapsize = 0 + + if "offset" in im.info: + offset = (rowbytes * rows + 16 + 3 + colormapsize) // 4 + else: + offset = 0 + + fp.write(o16b(cols) + o16b(rows) + o16b(rowbytes) + o16b(flags)) + fp.write(o8(bpp)) + fp.write(o8(version)) + fp.write(o16b(offset)) + fp.write(o8(transparent_index)) + fp.write(o8(compression_type)) + fp.write(o16b(0)) # reserved by Palm + + # now write colormap if necessary + + if colormapsize > 0: + fp.write(o16b(256)) + for i in range(256): + fp.write(o8(i)) + if colormapmode == 'RGB': + fp.write( + o8(colormap[3 * i]) + + o8(colormap[3 * i + 1]) + + o8(colormap[3 * i + 2])) + elif colormapmode == 'RGBA': + fp.write( + o8(colormap[4 * i]) + + o8(colormap[4 * i + 1]) + + o8(colormap[4 * i + 2])) + + # now convert data to raw form + ImageFile._save( + im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, rowbytes, 1))]) + + if hasattr(fp, "flush"): + fp.flush() + + +# +# -------------------------------------------------------------------- + +Image.register_save("Palm", _save) + +Image.register_extension("Palm", ".palm") + +Image.register_mime("Palm", "image/palm") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PalmImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PalmImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3730ee7d3ebd8147c0f0945fb8e7da38ce04d837 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PalmImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcdImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcdImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..fa95b5008f6f7dbd3387564159ae618a0c61cd79 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcdImagePlugin.py @@ -0,0 +1,66 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PCD file handling +# +# History: +# 96-05-10 fl Created +# 96-05-27 fl Added draft mode (128x192, 256x384) +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile +from ._binary import i8 + +__version__ = "0.1" + + +## +# Image plugin for PhotoCD images. This plugin only reads the 768x512 +# image from the file; higher resolutions are encoded in a proprietary +# encoding. + +class PcdImageFile(ImageFile.ImageFile): + + format = "PCD" + format_description = "Kodak PhotoCD" + + def _open(self): + + # rough + self.fp.seek(2048) + s = self.fp.read(2048) + + if s[:4] != b"PCD_": + raise SyntaxError("not a PCD file") + + orientation = i8(s[1538]) & 3 + self.tile_post_rotate = None + if orientation == 1: + self.tile_post_rotate = 90 + elif orientation == 3: + self.tile_post_rotate = -90 + + self.mode = "RGB" + self.size = 768, 512 # FIXME: not correct for rotated images! + self.tile = [("pcd", (0, 0)+self.size, 96*2048, None)] + + def load_end(self): + if self.tile_post_rotate: + # Handle rotated PCDs + self.im = self.im.rotate(self.tile_post_rotate) + self.size = self.im.size + + +# +# registry + +Image.register_open(PcdImageFile.format, PcdImageFile) + +Image.register_extension(PcdImageFile.format, ".pcd") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcdImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcdImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2851f1d40f9bd2e538483d3689cbd23ebf8fee0d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcdImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcfFontFile.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcfFontFile.py new file mode 100644 index 0000000000000000000000000000000000000000..eba85feb0fb226291b1b08007cf85ee58cc76450 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcfFontFile.py @@ -0,0 +1,245 @@ +# +# THIS IS WORK IN PROGRESS +# +# The Python Imaging Library +# $Id$ +# +# portable compiled font file parser +# +# history: +# 1997-08-19 fl created +# 2003-09-13 fl fixed loading of unicode fonts +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1997-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +from . import Image, FontFile +from ._binary import i8, i16le as l16, i32le as l32, i16be as b16, i32be as b32 + +# -------------------------------------------------------------------- +# declarations + +PCF_MAGIC = 0x70636601 # "\x01fcp" + +PCF_PROPERTIES = (1 << 0) +PCF_ACCELERATORS = (1 << 1) +PCF_METRICS = (1 << 2) +PCF_BITMAPS = (1 << 3) +PCF_INK_METRICS = (1 << 4) +PCF_BDF_ENCODINGS = (1 << 5) +PCF_SWIDTHS = (1 << 6) +PCF_GLYPH_NAMES = (1 << 7) +PCF_BDF_ACCELERATORS = (1 << 8) + +BYTES_PER_ROW = [ + lambda bits: ((bits+7) >> 3), + lambda bits: ((bits+15) >> 3) & ~1, + lambda bits: ((bits+31) >> 3) & ~3, + lambda bits: ((bits+63) >> 3) & ~7, +] + + +def sz(s, o): + return s[o:s.index(b"\0", o)] + + +## +# Font file plugin for the X11 PCF format. + +class PcfFontFile(FontFile.FontFile): + + name = "name" + + def __init__(self, fp): + + magic = l32(fp.read(4)) + if magic != PCF_MAGIC: + raise SyntaxError("not a PCF file") + + FontFile.FontFile.__init__(self) + + count = l32(fp.read(4)) + self.toc = {} + for i in range(count): + type = l32(fp.read(4)) + self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4)) + + self.fp = fp + + self.info = self._load_properties() + + metrics = self._load_metrics() + bitmaps = self._load_bitmaps(metrics) + encoding = self._load_encoding() + + # + # create glyph structure + + for ch in range(256): + ix = encoding[ch] + if ix is not None: + x, y, l, r, w, a, d, f = metrics[ix] + glyph = (w, 0), (l, d-y, x+l, d), (0, 0, x, y), bitmaps[ix] + self.glyph[ch] = glyph + + def _getformat(self, tag): + + format, size, offset = self.toc[tag] + + fp = self.fp + fp.seek(offset) + + format = l32(fp.read(4)) + + if format & 4: + i16, i32 = b16, b32 + else: + i16, i32 = l16, l32 + + return fp, format, i16, i32 + + def _load_properties(self): + + # + # font properties + + properties = {} + + fp, format, i16, i32 = self._getformat(PCF_PROPERTIES) + + nprops = i32(fp.read(4)) + + # read property description + p = [] + for i in range(nprops): + p.append((i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4)))) + if nprops & 3: + fp.seek(4 - (nprops & 3), 1) # pad + + data = fp.read(i32(fp.read(4))) + + for k, s, v in p: + k = sz(data, k) + if s: + v = sz(data, v) + properties[k] = v + + return properties + + def _load_metrics(self): + + # + # font metrics + + metrics = [] + + fp, format, i16, i32 = self._getformat(PCF_METRICS) + + append = metrics.append + + if (format & 0xff00) == 0x100: + + # "compressed" metrics + for i in range(i16(fp.read(2))): + left = i8(fp.read(1)) - 128 + right = i8(fp.read(1)) - 128 + width = i8(fp.read(1)) - 128 + ascent = i8(fp.read(1)) - 128 + descent = i8(fp.read(1)) - 128 + xsize = right - left + ysize = ascent + descent + append( + (xsize, ysize, left, right, width, + ascent, descent, 0) + ) + + else: + + # "jumbo" metrics + for i in range(i32(fp.read(4))): + left = i16(fp.read(2)) + right = i16(fp.read(2)) + width = i16(fp.read(2)) + ascent = i16(fp.read(2)) + descent = i16(fp.read(2)) + attributes = i16(fp.read(2)) + xsize = right - left + ysize = ascent + descent + append( + (xsize, ysize, left, right, width, + ascent, descent, attributes) + ) + + return metrics + + def _load_bitmaps(self, metrics): + + # + # bitmap data + + bitmaps = [] + + fp, format, i16, i32 = self._getformat(PCF_BITMAPS) + + nbitmaps = i32(fp.read(4)) + + if nbitmaps != len(metrics): + raise IOError("Wrong number of bitmaps") + + offsets = [] + for i in range(nbitmaps): + offsets.append(i32(fp.read(4))) + + bitmapSizes = [] + for i in range(4): + bitmapSizes.append(i32(fp.read(4))) + + # byteorder = format & 4 # non-zero => MSB + bitorder = format & 8 # non-zero => MSB + padindex = format & 3 + + bitmapsize = bitmapSizes[padindex] + offsets.append(bitmapsize) + + data = fp.read(bitmapsize) + + pad = BYTES_PER_ROW[padindex] + mode = "1;R" + if bitorder: + mode = "1" + + for i in range(nbitmaps): + x, y, l, r, w, a, d, f = metrics[i] + b, e = offsets[i], offsets[i+1] + bitmaps.append( + Image.frombytes("1", (x, y), data[b:e], "raw", mode, pad(x)) + ) + + return bitmaps + + def _load_encoding(self): + + # map character code to bitmap index + encoding = [None] * 256 + + fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS) + + firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2)) + firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2)) + + default = i16(fp.read(2)) + + nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1) + + for i in range(nencoding): + encodingOffset = i16(fp.read(2)) + if encodingOffset != 0xFFFF: + try: + encoding[i+firstCol] = encodingOffset + except IndexError: + break # only load ISO-8859-1 glyphs + + return encoding diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcfFontFile.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcfFontFile.pyc new file mode 100644 index 0000000000000000000000000000000000000000..150d185b19d7a4afed92c235c25165a92145159b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcfFontFile.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcxImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcxImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..564713a981f5629b0f7423757f463dace40df11d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcxImagePlugin.py @@ -0,0 +1,179 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PCX file handling +# +# This format was originally used by ZSoft's popular PaintBrush +# program for the IBM PC. It is also supported by many MS-DOS and +# Windows applications, including the Windows PaintBrush program in +# Windows 3. +# +# history: +# 1995-09-01 fl Created +# 1996-05-20 fl Fixed RGB support +# 1997-01-03 fl Fixed 2-bit and 4-bit support +# 1999-02-03 fl Fixed 8-bit support (broken in 1.0b1) +# 1999-02-07 fl Added write support +# 2002-06-09 fl Made 2-bit and 4-bit support a bit more robust +# 2002-07-30 fl Seek from to current position, not beginning of file +# 2003-06-03 fl Extract DPI settings (info["dpi"]) +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +import logging +from . import Image, ImageFile, ImagePalette +from ._binary import i8, i16le as i16, o8, o16le as o16 + +logger = logging.getLogger(__name__) + +__version__ = "0.6" + + +def _accept(prefix): + return i8(prefix[0]) == 10 and i8(prefix[1]) in [0, 2, 3, 5] + + +## +# Image plugin for Paintbrush images. + +class PcxImageFile(ImageFile.ImageFile): + + format = "PCX" + format_description = "Paintbrush" + + def _open(self): + + # header + s = self.fp.read(128) + if not _accept(s): + raise SyntaxError("not a PCX file") + + # image + bbox = i16(s, 4), i16(s, 6), i16(s, 8)+1, i16(s, 10)+1 + if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]: + raise SyntaxError("bad PCX image size") + logger.debug("BBox: %s %s %s %s", *bbox) + + # format + version = i8(s[1]) + bits = i8(s[3]) + planes = i8(s[65]) + stride = i16(s, 66) + logger.debug("PCX version %s, bits %s, planes %s, stride %s", + version, bits, planes, stride) + + self.info["dpi"] = i16(s, 12), i16(s, 14) + + if bits == 1 and planes == 1: + mode = rawmode = "1" + + elif bits == 1 and planes in (2, 4): + mode = "P" + rawmode = "P;%dL" % planes + self.palette = ImagePalette.raw("RGB", s[16:64]) + + elif version == 5 and bits == 8 and planes == 1: + mode = rawmode = "L" + # FIXME: hey, this doesn't work with the incremental loader !!! + self.fp.seek(-769, 2) + s = self.fp.read(769) + if len(s) == 769 and i8(s[0]) == 12: + # check if the palette is linear greyscale + for i in range(256): + if s[i*3+1:i*3+4] != o8(i)*3: + mode = rawmode = "P" + break + if mode == "P": + self.palette = ImagePalette.raw("RGB", s[1:]) + self.fp.seek(128) + + elif version == 5 and bits == 8 and planes == 3: + mode = "RGB" + rawmode = "RGB;L" + + else: + raise IOError("unknown PCX mode") + + self.mode = mode + self.size = bbox[2]-bbox[0], bbox[3]-bbox[1] + + bbox = (0, 0) + self.size + logger.debug("size: %sx%s", *self.size) + + self.tile = [("pcx", bbox, self.fp.tell(), (rawmode, planes * stride))] + +# -------------------------------------------------------------------- +# save PCX files + + +SAVE = { + # mode: (version, bits, planes, raw mode) + "1": (2, 1, 1, "1"), + "L": (5, 8, 1, "L"), + "P": (5, 8, 1, "P"), + "RGB": (5, 8, 3, "RGB;L"), +} + + +def _save(im, fp, filename): + + try: + version, bits, planes, rawmode = SAVE[im.mode] + except KeyError: + raise ValueError("Cannot save %s images as PCX" % im.mode) + + # bytes per plane + stride = (im.size[0] * bits + 7) // 8 + # stride should be even + stride += stride % 2 + # Stride needs to be kept in sync with the PcxEncode.c version. + # Ideally it should be passed in in the state, but the bytes value + # gets overwritten. + + logger.debug("PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d", + im.size[0], bits, stride) + + # under windows, we could determine the current screen size with + # "Image.core.display_mode()[1]", but I think that's overkill... + + screen = im.size + + dpi = 100, 100 + + # PCX header + fp.write( + o8(10) + o8(version) + o8(1) + o8(bits) + o16(0) + + o16(0) + o16(im.size[0]-1) + o16(im.size[1]-1) + o16(dpi[0]) + + o16(dpi[1]) + b"\0"*24 + b"\xFF"*24 + b"\0" + o8(planes) + + o16(stride) + o16(1) + o16(screen[0]) + o16(screen[1]) + + b"\0"*54 + ) + + assert fp.tell() == 128 + + ImageFile._save(im, fp, [("pcx", (0, 0)+im.size, 0, + (rawmode, bits*planes))]) + + if im.mode == "P": + # colour palette + fp.write(o8(12)) + fp.write(im.im.getpalette("RGB", "RGB")) # 768 bytes + elif im.mode == "L": + # greyscale palette + fp.write(o8(12)) + for i in range(256): + fp.write(o8(i)*3) + +# -------------------------------------------------------------------- +# registry + + +Image.register_open(PcxImageFile.format, PcxImageFile, _accept) +Image.register_save(PcxImageFile.format, _save) + +Image.register_extension(PcxImageFile.format, ".pcx") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcxImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcxImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fd4be78fc33edcd3d1f2ec8c3120ce4d84a6736 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PcxImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..8538bcd49152db3855a5fd2c56db9ed40348b990 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfImagePlugin.py @@ -0,0 +1,227 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PDF (Acrobat) file handling +# +# History: +# 1996-07-16 fl Created +# 1997-01-18 fl Fixed header +# 2004-02-21 fl Fixes for 1/L/CMYK images, etc. +# 2004-02-24 fl Fixes for 1 and P images. +# +# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved. +# Copyright (c) 1996-1997 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +## +# Image plugin for PDF images (output only). +## + +from . import Image, ImageFile, ImageSequence, PdfParser +import io + +__version__ = "0.5" + + +# +# -------------------------------------------------------------------- + +# object ids: +# 1. catalogue +# 2. pages +# 3. image +# 4. page +# 5. page contents + + +def _save_all(im, fp, filename): + _save(im, fp, filename, save_all=True) + + +## +# (Internal) Image save plugin for the PDF format. + +def _save(im, fp, filename, save_all=False): + resolution = im.encoderinfo.get("resolution", 72.0) + is_appending = im.encoderinfo.get("append", False) + title = im.encoderinfo.get("title", None) + author = im.encoderinfo.get("author", None) + subject = im.encoderinfo.get("subject", None) + keywords = im.encoderinfo.get("keywords", None) + creator = im.encoderinfo.get("creator", None) + producer = im.encoderinfo.get("producer", None) + + if is_appending: + existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="r+b") + else: + existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="w+b") + + if title: + existing_pdf.info.Title = title + if author: + existing_pdf.info.Author = author + if subject: + existing_pdf.info.Subject = subject + if keywords: + existing_pdf.info.Keywords = keywords + if creator: + existing_pdf.info.Creator = creator + if producer: + existing_pdf.info.Producer = producer + + # + # make sure image data is available + im.load() + + existing_pdf.start_writing() + existing_pdf.write_header() + existing_pdf.write_comment("created by PIL PDF driver " + __version__) + + # + # pages + ims = [im] + if save_all: + append_images = im.encoderinfo.get("append_images", []) + for append_im in append_images: + append_im.encoderinfo = im.encoderinfo.copy() + ims.append(append_im) + numberOfPages = 0 + image_refs = [] + page_refs = [] + contents_refs = [] + for im in ims: + im_numberOfPages = 1 + if save_all: + try: + im_numberOfPages = im.n_frames + except AttributeError: + # Image format does not have n_frames. It is a single frame image + pass + numberOfPages += im_numberOfPages + for i in range(im_numberOfPages): + image_refs.append(existing_pdf.next_object_id(0)) + page_refs.append(existing_pdf.next_object_id(0)) + contents_refs.append(existing_pdf.next_object_id(0)) + existing_pdf.pages.append(page_refs[-1]) + + # + # catalog and list of pages + existing_pdf.write_catalog() + + pageNumber = 0 + for imSequence in ims: + im_pages = ImageSequence.Iterator(imSequence) if save_all else [imSequence] + for im in im_pages: + # FIXME: Should replace ASCIIHexDecode with RunLengthDecode (packbits) + # or LZWDecode (tiff/lzw compression). Note that PDF 1.2 also supports + # Flatedecode (zip compression). + + bits = 8 + params = None + + if im.mode == "1": + filter = "ASCIIHexDecode" + colorspace = PdfParser.PdfName("DeviceGray") + procset = "ImageB" # grayscale + bits = 1 + elif im.mode == "L": + filter = "DCTDecode" + # params = "<< /Predictor 15 /Columns %d >>" % (width-2) + colorspace = PdfParser.PdfName("DeviceGray") + procset = "ImageB" # grayscale + elif im.mode == "P": + filter = "ASCIIHexDecode" + palette = im.im.getpalette("RGB") + colorspace = [PdfParser.PdfName("Indexed"), PdfParser.PdfName("DeviceRGB"), 255, PdfParser.PdfBinary(palette)] + procset = "ImageI" # indexed color + elif im.mode == "RGB": + filter = "DCTDecode" + colorspace = PdfParser.PdfName("DeviceRGB") + procset = "ImageC" # color images + elif im.mode == "CMYK": + filter = "DCTDecode" + colorspace = PdfParser.PdfName("DeviceCMYK") + procset = "ImageC" # color images + else: + raise ValueError("cannot save mode %s" % im.mode) + + # + # image + + op = io.BytesIO() + + if filter == "ASCIIHexDecode": + if bits == 1: + # FIXME: the hex encoder doesn't support packed 1-bit + # images; do things the hard way... + data = im.tobytes("raw", "1") + im = Image.new("L", (len(data), 1), None) + im.putdata(data) + ImageFile._save(im, op, [("hex", (0, 0)+im.size, 0, im.mode)]) + elif filter == "DCTDecode": + Image.SAVE["JPEG"](im, op, filename) + elif filter == "FlateDecode": + ImageFile._save(im, op, [("zip", (0, 0)+im.size, 0, im.mode)]) + elif filter == "RunLengthDecode": + ImageFile._save(im, op, [("packbits", (0, 0)+im.size, 0, im.mode)]) + else: + raise ValueError("unsupported PDF filter (%s)" % filter) + + # + # Get image characteristics + + width, height = im.size + + existing_pdf.write_obj(image_refs[pageNumber], stream=op.getvalue(), + Type=PdfParser.PdfName("XObject"), + Subtype=PdfParser.PdfName("Image"), + Width=width, # * 72.0 / resolution, + Height=height, # * 72.0 / resolution, + Filter=PdfParser.PdfName(filter), + BitsPerComponent=bits, + DecodeParams=params, + ColorSpace=colorspace) + + # + # page + + existing_pdf.write_page(page_refs[pageNumber], + Resources=PdfParser.PdfDict( + ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)], + XObject=PdfParser.PdfDict(image=image_refs[pageNumber])), + MediaBox=[0, 0, int(width * 72.0 / resolution), int(height * 72.0 / resolution)], + Contents=contents_refs[pageNumber] + ) + + # + # page contents + + page_contents = PdfParser.make_bytes( + "q %d 0 0 %d 0 0 cm /image Do Q\n" % ( + int(width * 72.0 / resolution), + int(height * 72.0 / resolution))) + + existing_pdf.write_obj(contents_refs[pageNumber], stream=page_contents) + + pageNumber += 1 + + # + # trailer + existing_pdf.write_xref_and_trailer() + if hasattr(fp, "flush"): + fp.flush() + existing_pdf.close() + +# +# -------------------------------------------------------------------- + + +Image.register_save("PDF", _save) +Image.register_save_all("PDF", _save_all) + +Image.register_extension("PDF", ".pdf") + +Image.register_mime("PDF", "application/pdf") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b30a3920f77b529d19e519ae26f929195f7a6b80 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfParser.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfParser.py new file mode 100644 index 0000000000000000000000000000000000000000..c0635ef3104664f1bfe643078e41140ea8a94ad6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfParser.py @@ -0,0 +1,844 @@ +import codecs +import collections +import mmap +import os +import re +import zlib +from ._util import py3 + +try: + from UserDict import UserDict # Python 2.x +except ImportError: + UserDict = collections.UserDict # Python 3.x + + +if py3: # Python 3.x + def make_bytes(s): + return s.encode("us-ascii") +else: # Python 2.x + def make_bytes(s): # pragma: no cover + return s # pragma: no cover + + +# see 7.9.2.2 Text String Type on page 86 and D.3 PDFDocEncoding Character Set on page 656 +def encode_text(s): + return codecs.BOM_UTF16_BE + s.encode("utf_16_be") + + +PDFDocEncoding = { + 0x16: u"\u0017", + 0x18: u"\u02D8", + 0x19: u"\u02C7", + 0x1A: u"\u02C6", + 0x1B: u"\u02D9", + 0x1C: u"\u02DD", + 0x1D: u"\u02DB", + 0x1E: u"\u02DA", + 0x1F: u"\u02DC", + 0x80: u"\u2022", + 0x81: u"\u2020", + 0x82: u"\u2021", + 0x83: u"\u2026", + 0x84: u"\u2014", + 0x85: u"\u2013", + 0x86: u"\u0192", + 0x87: u"\u2044", + 0x88: u"\u2039", + 0x89: u"\u203A", + 0x8A: u"\u2212", + 0x8B: u"\u2030", + 0x8C: u"\u201E", + 0x8D: u"\u201C", + 0x8E: u"\u201D", + 0x8F: u"\u2018", + 0x90: u"\u2019", + 0x91: u"\u201A", + 0x92: u"\u2122", + 0x93: u"\uFB01", + 0x94: u"\uFB02", + 0x95: u"\u0141", + 0x96: u"\u0152", + 0x97: u"\u0160", + 0x98: u"\u0178", + 0x99: u"\u017D", + 0x9A: u"\u0131", + 0x9B: u"\u0142", + 0x9C: u"\u0153", + 0x9D: u"\u0161", + 0x9E: u"\u017E", + 0xA0: u"\u20AC", +} + + +def decode_text(b): + if b[:len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE: + return b[len(codecs.BOM_UTF16_BE):].decode("utf_16_be") + elif py3: # Python 3.x + return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b) + else: # Python 2.x + return u"".join(PDFDocEncoding.get(ord(byte), byte) for byte in b) + + +class PdfFormatError(RuntimeError): + """An error that probably indicates a syntactic or semantic error in the PDF file structure""" + pass + + +def check_format_condition(condition, error_message): + if not condition: + raise PdfFormatError(error_message) + + +class IndirectReference(collections.namedtuple("IndirectReferenceTuple", ["object_id", "generation"])): + def __str__(self): + return "%s %s R" % self + + def __bytes__(self): + return self.__str__().encode("us-ascii") + + def __eq__(self, other): + return other.__class__ is self.__class__ and other.object_id == self.object_id and other.generation == self.generation + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash((self.object_id, self.generation)) + + +class IndirectObjectDef(IndirectReference): + def __str__(self): + return "%s %s obj" % self + + +class XrefTable: + def __init__(self): + self.existing_entries = {} # object ID => (offset, generation) + self.new_entries = {} # object ID => (offset, generation) + self.deleted_entries = {0: 65536} # object ID => generation + self.reading_finished = False + + def __setitem__(self, key, value): + if self.reading_finished: + self.new_entries[key] = value + else: + self.existing_entries[key] = value + if key in self.deleted_entries: + del self.deleted_entries[key] + + def __getitem__(self, key): + try: + return self.new_entries[key] + except KeyError: + return self.existing_entries[key] + + def __delitem__(self, key): + if key in self.new_entries: + generation = self.new_entries[key][1] + 1 + del self.new_entries[key] + self.deleted_entries[key] = generation + elif key in self.existing_entries: + generation = self.existing_entries[key][1] + 1 + self.deleted_entries[key] = generation + elif key in self.deleted_entries: + generation = self.deleted_entries[key] + else: + raise IndexError("object ID " + str(key) + " cannot be deleted because it doesn't exist") + + def __contains__(self, key): + return key in self.existing_entries or key in self.new_entries + + def __len__(self): + return len(set(self.existing_entries.keys()) | set(self.new_entries.keys()) | set(self.deleted_entries.keys())) + + def keys(self): + return (set(self.existing_entries.keys()) - set(self.deleted_entries.keys())) | set(self.new_entries.keys()) + + def write(self, f): + keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys())) + deleted_keys = sorted(set(self.deleted_entries.keys())) + startxref = f.tell() + f.write(b"xref\n") + while keys: + # find a contiguous sequence of object IDs + prev = None + for index, key in enumerate(keys): + if prev is None or prev+1 == key: + prev = key + else: + contiguous_keys = keys[:index] + keys = keys[index:] + break + else: + contiguous_keys = keys + keys = None + f.write(make_bytes("%d %d\n" % (contiguous_keys[0], len(contiguous_keys)))) + for object_id in contiguous_keys: + if object_id in self.new_entries: + f.write(make_bytes("%010d %05d n \n" % self.new_entries[object_id])) + else: + this_deleted_object_id = deleted_keys.pop(0) + check_format_condition(object_id == this_deleted_object_id, + "expected the next deleted object " + "ID to be %s, instead found %s" % + (object_id, this_deleted_object_id)) + try: + next_in_linked_list = deleted_keys[0] + except IndexError: + next_in_linked_list = 0 + f.write(make_bytes("%010d %05d f \n" % (next_in_linked_list, self.deleted_entries[object_id]))) + return startxref + + +class PdfName: + def __init__(self, name): + if isinstance(name, PdfName): + self.name = name.name + elif isinstance(name, bytes): + self.name = name + else: + self.name = name.encode("us-ascii") + + def name_as_str(self): + return self.name.decode("us-ascii") + + def __eq__(self, other): + return (isinstance(other, PdfName) and other.name == self.name) or other == self.name + + def __hash__(self): + return hash(self.name) + + def __repr__(self): + return "PdfName(%s)" % repr(self.name) + + @classmethod + def from_pdf_stream(cls, data): + return cls(PdfParser.interpret_name(data)) + + allowed_chars = set(range(33, 127)) - set(ord(c) for c in "#%/()<>[]{}") + + def __bytes__(self): + result = bytearray(b"/") + for b in self.name: + if py3: # Python 3.x + if b in self.allowed_chars: + result.append(b) + else: + result.extend(make_bytes("#%02X" % b)) + else: # Python 2.x + if ord(b) in self.allowed_chars: + result.append(b) + else: + result.extend(b"#%02X" % ord(b)) + return bytes(result) + + __str__ = __bytes__ + + +class PdfArray(list): + def __bytes__(self): + return b"[ " + b" ".join(pdf_repr(x) for x in self) + b" ]" + + __str__ = __bytes__ + + +class PdfDict(UserDict): + def __setattr__(self, key, value): + if key == "data": + if hasattr(UserDict, "__setattr__"): + UserDict.__setattr__(self, key, value) + else: + self.__dict__[key] = value + else: + if isinstance(key, str): + key = key.encode("us-ascii") + self[key] = value + + def __getattr__(self, key): + try: + value = self[key] + except KeyError: + try: + value = self[key.encode("us-ascii")] + except KeyError: + raise AttributeError(key) + if isinstance(value, bytes): + return decode_text(value) + else: + return value + + def __bytes__(self): + out = bytearray(b"<<") + for key, value in self.items(): + if value is None: + continue + value = pdf_repr(value) + out.extend(b"\n") + out.extend(bytes(PdfName(key))) + out.extend(b" ") + out.extend(value) + out.extend(b"\n>>") + return bytes(out) + + if not py3: + __str__ = __bytes__ + + +class PdfBinary: + def __init__(self, data): + self.data = data + + if py3: # Python 3.x + def __bytes__(self): + return make_bytes("<%s>" % "".join("%02X" % b for b in self.data)) + else: # Python 2.x + def __str__(self): + return "<%s>" % "".join("%02X" % ord(b) for b in self.data) + + +class PdfStream: + def __init__(self, dictionary, buf): + self.dictionary = dictionary + self.buf = buf + + def decode(self): + try: + filter = self.dictionary.Filter + except AttributeError: + return self.buf + if filter == b"FlateDecode": + try: + expected_length = self.dictionary.DL + except AttributeError: + expected_length = self.dictionary.Length + return zlib.decompress(self.buf, bufsize=int(expected_length)) + else: + raise NotImplementedError("stream filter %s unknown/unsupported" % repr(self.dictionary.Filter)) + + +def pdf_repr(x): + if x is True: + return b"true" + elif x is False: + return b"false" + elif x is None: + return b"null" + elif isinstance(x, PdfName) or isinstance(x, PdfDict) or isinstance(x, PdfArray) or isinstance(x, PdfBinary): + return bytes(x) + elif isinstance(x, int): + return str(x).encode("us-ascii") + elif isinstance(x, dict): + return bytes(PdfDict(x)) + elif isinstance(x, list): + return bytes(PdfArray(x)) + elif (py3 and isinstance(x, str)) or (not py3 and isinstance(x, unicode)): + return pdf_repr(encode_text(x)) + elif isinstance(x, bytes): + return b"(" + x.replace(b"\\", b"\\\\").replace(b"(", b"\\(").replace(b")", b"\\)") + b")" # XXX escape more chars? handle binary garbage + else: + return bytes(x) + + +class PdfParser: + """Based on https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf + Supports PDF up to 1.4 + """ + + def __init__(self, filename=None, f=None, buf=None, start_offset=0, mode="rb"): + # type: (PdfParser, str, file, Union[bytes, bytearray], int, str) -> None + if buf and f: + raise RuntimeError("specify buf or f or filename, but not both buf and f") + self.filename = filename + self.buf = buf + self.f = f + self.start_offset = start_offset + self.should_close_buf = False + self.should_close_file = False + if filename is not None and f is None: + self.f = f = open(filename, mode) + self.should_close_file = True + if f is not None: + self.buf = buf = self.get_buf_from_file(f) + self.should_close_buf = True + if not filename and hasattr(f, "name"): + self.filename = f.name + self.cached_objects = {} + if buf: + self.read_pdf_info() + else: + self.file_size_total = self.file_size_this = 0 + self.root = PdfDict() + self.root_ref = None + self.info = PdfDict() + self.info_ref = None + self.page_tree_root = {} + self.pages = [] + self.orig_pages = [] + self.pages_ref = None + self.last_xref_section_offset = None + self.trailer_dict = {} + self.xref_table = XrefTable() + self.xref_table.reading_finished = True + if f: + self.seek_end() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + return False # do not suppress exceptions + + def start_writing(self): + self.close_buf() + self.seek_end() + + def close_buf(self): + try: + self.buf.close() + except AttributeError: + pass + self.buf = None + + def close(self): + if self.should_close_buf: + self.close_buf() + if self.f is not None and self.should_close_file: + self.f.close() + self.f = None + + def seek_end(self): + self.f.seek(0, os.SEEK_END) + + def write_header(self): + self.f.write(b"%PDF-1.4\n") + + def write_comment(self, s): + self.f.write(("%% %s\n" % (s,)).encode("utf-8")) + + def write_catalog(self): + self.del_root() + self.root_ref = self.next_object_id(self.f.tell()) + self.pages_ref = self.next_object_id(0) + self.rewrite_pages() + self.write_obj(self.root_ref, + Type=PdfName(b"Catalog"), + Pages=self.pages_ref) + self.write_obj(self.pages_ref, + Type=PdfName(b"Pages"), + Count=len(self.pages), + Kids=self.pages) + return self.root_ref + + def rewrite_pages(self): + pages_tree_nodes_to_delete = [] + for i, page_ref in enumerate(self.orig_pages): + page_info = self.cached_objects[page_ref] + del self.xref_table[page_ref.object_id] + pages_tree_nodes_to_delete.append(page_info[PdfName(b"Parent")]) + if page_ref not in self.pages: + # the page has been deleted + continue + # make dict keys into strings for passing to write_page + stringified_page_info = {} + for key, value in page_info.items(): + # key should be a PdfName + stringified_page_info[key.name_as_str()] = value + stringified_page_info["Parent"] = self.pages_ref + new_page_ref = self.write_page(None, **stringified_page_info) + for j, cur_page_ref in enumerate(self.pages): + if cur_page_ref == page_ref: + # replace the page reference with the new one + self.pages[j] = new_page_ref + # delete redundant Pages tree nodes from xref table + for pages_tree_node_ref in pages_tree_nodes_to_delete: + while pages_tree_node_ref: + pages_tree_node = self.cached_objects[pages_tree_node_ref] + if pages_tree_node_ref.object_id in self.xref_table: + del self.xref_table[pages_tree_node_ref.object_id] + pages_tree_node_ref = pages_tree_node.get(b"Parent", None) + self.orig_pages = [] + + def write_xref_and_trailer(self, new_root_ref=None): + if new_root_ref: + self.del_root() + self.root_ref = new_root_ref + if self.info: + self.info_ref = self.write_obj(None, self.info) + start_xref = self.xref_table.write(self.f) + num_entries = len(self.xref_table) + trailer_dict = {b"Root": self.root_ref, b"Size": num_entries} + if self.last_xref_section_offset is not None: + trailer_dict[b"Prev"] = self.last_xref_section_offset + if self.info: + trailer_dict[b"Info"] = self.info_ref + self.last_xref_section_offset = start_xref + self.f.write(b"trailer\n" + bytes(PdfDict(trailer_dict)) + make_bytes("\nstartxref\n%d\n%%%%EOF" % start_xref)) + + def write_page(self, ref, *objs, **dict_obj): + if isinstance(ref, int): + ref = self.pages[ref] + if "Type" not in dict_obj: + dict_obj["Type"] = PdfName(b"Page") + if "Parent" not in dict_obj: + dict_obj["Parent"] = self.pages_ref + return self.write_obj(ref, *objs, **dict_obj) + + def write_obj(self, ref, *objs, **dict_obj): + f = self.f + if ref is None: + ref = self.next_object_id(f.tell()) + else: + self.xref_table[ref.object_id] = (f.tell(), ref.generation) + f.write(bytes(IndirectObjectDef(*ref))) + stream = dict_obj.pop("stream", None) + if stream is not None: + dict_obj["Length"] = len(stream) + if dict_obj: + f.write(pdf_repr(dict_obj)) + for obj in objs: + f.write(pdf_repr(obj)) + if stream is not None: + f.write(b"stream\n") + f.write(stream) + f.write(b"\nendstream\n") + f.write(b"endobj\n") + return ref + + def del_root(self): + if self.root_ref is None: + return + del self.xref_table[self.root_ref.object_id] + del self.xref_table[self.root[b"Pages"].object_id] + + @staticmethod + def get_buf_from_file(f): + if hasattr(f, "getbuffer"): + return f.getbuffer() + elif hasattr(f, "getvalue"): + return f.getvalue() + else: + try: + return mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + except ValueError: # cannot mmap an empty file + return b"" + + def read_pdf_info(self): + self.file_size_total = len(self.buf) + self.file_size_this = self.file_size_total - self.start_offset + self.read_trailer() + self.root_ref = self.trailer_dict[b"Root"] + self.info_ref = self.trailer_dict.get(b"Info", None) + self.root = PdfDict(self.read_indirect(self.root_ref)) + if self.info_ref is None: + self.info = PdfDict() + else: + self.info = PdfDict(self.read_indirect(self.info_ref)) + check_format_condition(b"Type" in self.root, "/Type missing in Root") + check_format_condition(self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog") + check_format_condition(b"Pages" in self.root, "/Pages missing in Root") + check_format_condition(isinstance(self.root[b"Pages"], IndirectReference), "/Pages in Root is not an indirect reference") + self.pages_ref = self.root[b"Pages"] + self.page_tree_root = self.read_indirect(self.pages_ref) + self.pages = self.linearize_page_tree(self.page_tree_root) + # save the original list of page references in case the user modifies, adds or deletes some pages and we need to rewrite the pages and their list + self.orig_pages = self.pages[:] + + def next_object_id(self, offset=None): + try: + # TODO: support reuse of deleted objects + reference = IndirectReference(max(self.xref_table.keys()) + 1, 0) + except ValueError: + reference = IndirectReference(1, 0) + if offset is not None: + self.xref_table[reference.object_id] = (offset, 0) + return reference + + delimiter = br"[][()<>{}/%]" + delimiter_or_ws = br"[][()<>{}/%\000\011\012\014\015\040]" + whitespace = br"[\000\011\012\014\015\040]" + whitespace_or_hex = br"[\000\011\012\014\015\0400-9a-fA-F]" + whitespace_optional = whitespace + b"*" + whitespace_mandatory = whitespace + b"+" + newline_only = br"[\r\n]+" + newline = whitespace_optional + newline_only + whitespace_optional + re_trailer_end = re.compile(whitespace_mandatory + br"trailer" + whitespace_optional + br"\<\<(.*\>\>)" + newline + + br"startxref" + newline + br"([0-9]+)" + newline + br"%%EOF" + whitespace_optional + br"$", re.DOTALL) + re_trailer_prev = re.compile(whitespace_optional + br"trailer" + whitespace_optional + br"\<\<(.*?\>\>)" + newline + + br"startxref" + newline + br"([0-9]+)" + newline + br"%%EOF" + whitespace_optional, re.DOTALL) + + def read_trailer(self): + search_start_offset = len(self.buf) - 16384 + if search_start_offset < self.start_offset: + search_start_offset = self.start_offset + m = self.re_trailer_end.search(self.buf, search_start_offset) + check_format_condition(m, "trailer end not found") + # make sure we found the LAST trailer + last_match = m + while m: + last_match = m + m = self.re_trailer_end.search(self.buf, m.start()+16) + if not m: + m = last_match + trailer_data = m.group(1) + self.last_xref_section_offset = int(m.group(2)) + self.trailer_dict = self.interpret_trailer(trailer_data) + self.xref_table = XrefTable() + self.read_xref_table(xref_section_offset=self.last_xref_section_offset) + if b"Prev" in self.trailer_dict: + self.read_prev_trailer(self.trailer_dict[b"Prev"]) + + def read_prev_trailer(self, xref_section_offset): + trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset) + m = self.re_trailer_prev.search(self.buf[trailer_offset:trailer_offset+16384]) + check_format_condition(m, "previous trailer not found") + trailer_data = m.group(1) + check_format_condition(int(m.group(2)) == xref_section_offset, "xref section offset in previous trailer doesn't match what was expected") + trailer_dict = self.interpret_trailer(trailer_data) + if b"Prev" in trailer_dict: + self.read_prev_trailer(trailer_dict[b"Prev"]) + + re_whitespace_optional = re.compile(whitespace_optional) + re_name = re.compile(whitespace_optional + br"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?=" + delimiter_or_ws + br")") + re_dict_start = re.compile(whitespace_optional + br"\<\<") + re_dict_end = re.compile(whitespace_optional + br"\>\>" + whitespace_optional) + + @classmethod + def interpret_trailer(cls, trailer_data): + trailer = {} + offset = 0 + while True: + m = cls.re_name.match(trailer_data, offset) + if not m: + m = cls.re_dict_end.match(trailer_data, offset) + check_format_condition(m and m.end() == len(trailer_data), "name not found in trailer, remaining data: " + repr(trailer_data[offset:])) + break + key = cls.interpret_name(m.group(1)) + value, offset = cls.get_value(trailer_data, m.end()) + trailer[key] = value + check_format_condition(b"Size" in trailer and isinstance(trailer[b"Size"], int), "/Size not in trailer or not an integer") + check_format_condition(b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference), "/Root not in trailer or not an indirect reference") + return trailer + + re_hashes_in_name = re.compile(br"([^#]*)(#([0-9a-fA-F]{2}))?") + + @classmethod + def interpret_name(cls, raw, as_text=False): + name = b"" + for m in cls.re_hashes_in_name.finditer(raw): + if m.group(3): + name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii")) + else: + name += m.group(1) + if as_text: + return name.decode("utf-8") + else: + return bytes(name) + + re_null = re.compile(whitespace_optional + br"null(?=" + delimiter_or_ws + br")") + re_true = re.compile(whitespace_optional + br"true(?=" + delimiter_or_ws + br")") + re_false = re.compile(whitespace_optional + br"false(?=" + delimiter_or_ws + br")") + re_int = re.compile(whitespace_optional + br"([-+]?[0-9]+)(?=" + delimiter_or_ws + br")") + re_real = re.compile(whitespace_optional + br"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?=" + delimiter_or_ws + br")") + re_array_start = re.compile(whitespace_optional + br"\[") + re_array_end = re.compile(whitespace_optional + br"]") + re_string_hex = re.compile(whitespace_optional + br"\<(" + whitespace_or_hex + br"*)\>") + re_string_lit = re.compile(whitespace_optional + br"\(") + re_indirect_reference = re.compile(whitespace_optional + br"([-+]?[0-9]+)" + whitespace_mandatory + br"([-+]?[0-9]+)" + whitespace_mandatory + br"R(?=" + delimiter_or_ws + br")") + re_indirect_def_start = re.compile(whitespace_optional + br"([-+]?[0-9]+)" + whitespace_mandatory + br"([-+]?[0-9]+)" + whitespace_mandatory + br"obj(?=" + delimiter_or_ws + br")") + re_indirect_def_end = re.compile(whitespace_optional + br"endobj(?=" + delimiter_or_ws + br")") + re_comment = re.compile(br"(" + whitespace_optional + br"%[^\r\n]*" + newline + br")*") + re_stream_start = re.compile(whitespace_optional + br"stream\r?\n") + re_stream_end = re.compile(whitespace_optional + br"endstream(?=" + delimiter_or_ws + br")") + + @classmethod + def get_value(cls, data, offset, expect_indirect=None, max_nesting=-1): + if max_nesting == 0: + return None, None + m = cls.re_comment.match(data, offset) + if m: + offset = m.end() + m = cls.re_indirect_def_start.match(data, offset) + if m: + check_format_condition(int(m.group(1)) > 0, "indirect object definition: object ID must be greater than 0") + check_format_condition(int(m.group(2)) >= 0, "indirect object definition: generation must be non-negative") + check_format_condition(expect_indirect is None or expect_indirect == IndirectReference(int(m.group(1)), int(m.group(2))), + "indirect object definition different than expected") + object, offset = cls.get_value(data, m.end(), max_nesting=max_nesting-1) + if offset is None: + return object, None + m = cls.re_indirect_def_end.match(data, offset) + check_format_condition(m, "indirect object definition end not found") + return object, m.end() + check_format_condition(not expect_indirect, "indirect object definition not found") + m = cls.re_indirect_reference.match(data, offset) + if m: + check_format_condition(int(m.group(1)) > 0, "indirect object reference: object ID must be greater than 0") + check_format_condition(int(m.group(2)) >= 0, "indirect object reference: generation must be non-negative") + return IndirectReference(int(m.group(1)), int(m.group(2))), m.end() + m = cls.re_dict_start.match(data, offset) + if m: + offset = m.end() + result = {} + m = cls.re_dict_end.match(data, offset) + while not m: + key, offset = cls.get_value(data, offset, max_nesting=max_nesting-1) + if offset is None: + return result, None + value, offset = cls.get_value(data, offset, max_nesting=max_nesting-1) + result[key] = value + if offset is None: + return result, None + m = cls.re_dict_end.match(data, offset) + offset = m.end() + m = cls.re_stream_start.match(data, offset) + if m: + try: + stream_len = int(result[b"Length"]) + except (TypeError, KeyError, ValueError): + raise PdfFormatError("bad or missing Length in stream dict (%r)" % result.get(b"Length", None)) + stream_data = data[m.end():m.end() + stream_len] + m = cls.re_stream_end.match(data, m.end() + stream_len) + check_format_condition(m, "stream end not found") + offset = m.end() + result = PdfStream(PdfDict(result), stream_data) + else: + result = PdfDict(result) + return result, offset + m = cls.re_array_start.match(data, offset) + if m: + offset = m.end() + result = [] + m = cls.re_array_end.match(data, offset) + while not m: + value, offset = cls.get_value(data, offset, max_nesting=max_nesting-1) + result.append(value) + if offset is None: + return result, None + m = cls.re_array_end.match(data, offset) + return result, m.end() + m = cls.re_null.match(data, offset) + if m: + return None, m.end() + m = cls.re_true.match(data, offset) + if m: + return True, m.end() + m = cls.re_false.match(data, offset) + if m: + return False, m.end() + m = cls.re_name.match(data, offset) + if m: + return PdfName(cls.interpret_name(m.group(1))), m.end() + m = cls.re_int.match(data, offset) + if m: + return int(m.group(1)), m.end() + m = cls.re_real.match(data, offset) + if m: + return float(m.group(1)), m.end() # XXX Decimal instead of float??? + m = cls.re_string_hex.match(data, offset) + if m: + hex_string = bytearray([b for b in m.group(1) if b in b"0123456789abcdefABCDEF"]) # filter out whitespace + if len(hex_string) % 2 == 1: + hex_string.append(ord(b"0")) # append a 0 if the length is not even - yes, at the end + return bytearray.fromhex(hex_string.decode("us-ascii")), m.end() + m = cls.re_string_lit.match(data, offset) + if m: + return cls.get_literal_string(data, m.end()) + #return None, offset # fallback (only for debugging) + raise PdfFormatError("unrecognized object: " + repr(data[offset:offset+32])) + + re_lit_str_token = re.compile(br"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))") + escaped_chars = { + b"n": b"\n", + b"r": b"\r", + b"t": b"\t", + b"b": b"\b", + b"f": b"\f", + b"(": b"(", + b")": b")", + b"\\": b"\\", + ord(b"n"): b"\n", + ord(b"r"): b"\r", + ord(b"t"): b"\t", + ord(b"b"): b"\b", + ord(b"f"): b"\f", + ord(b"("): b"(", + ord(b")"): b")", + ord(b"\\"): b"\\", + } + + @classmethod + def get_literal_string(cls, data, offset): + nesting_depth = 0 + result = bytearray() + for m in cls.re_lit_str_token.finditer(data, offset): + result.extend(data[offset:m.start()]) + if m.group(1): + result.extend(cls.escaped_chars[m.group(1)[1]]) + elif m.group(2): + result.append(int(m.group(2)[1:], 8)) + elif m.group(3): + pass + elif m.group(5): + result.extend(b"\n") + elif m.group(6): + result.extend(b"(") + nesting_depth += 1 + elif m.group(7): + if nesting_depth == 0: + return bytes(result), m.end() + result.extend(b")") + nesting_depth -= 1 + offset = m.end() + raise PdfFormatError("unfinished literal string") + + re_xref_section_start = re.compile(whitespace_optional + br"xref" + newline) + re_xref_subsection_start = re.compile(whitespace_optional + br"([0-9]+)" + whitespace_mandatory + br"([0-9]+)" + whitespace_optional + newline_only) + re_xref_entry = re.compile(br"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)") + + def read_xref_table(self, xref_section_offset): + subsection_found = False + m = self.re_xref_section_start.match(self.buf, xref_section_offset + self.start_offset) + check_format_condition(m, "xref section start not found") + offset = m.end() + while True: + m = self.re_xref_subsection_start.match(self.buf, offset) + if not m: + check_format_condition(subsection_found, "xref subsection start not found") + break + subsection_found = True + offset = m.end() + first_object = int(m.group(1)) + num_objects = int(m.group(2)) + for i in range(first_object, first_object+num_objects): + m = self.re_xref_entry.match(self.buf, offset) + check_format_condition(m, "xref entry not found") + offset = m.end() + is_free = m.group(3) == b"f" + generation = int(m.group(2)) + if not is_free: + new_entry = (int(m.group(1)), generation) + check_format_condition(i not in self.xref_table or self.xref_table[i] == new_entry, "xref entry duplicated (and not identical)") + self.xref_table[i] = new_entry + return offset + + def read_indirect(self, ref, max_nesting=-1): + offset, generation = self.xref_table[ref[0]] + check_format_condition(generation == ref[1], "expected to find generation %s for object ID %s in xref table, instead found generation %s at offset %s" \ + % (ref[1], ref[0], generation, offset)) + value = self.get_value(self.buf, offset + self.start_offset, expect_indirect=IndirectReference(*ref), max_nesting=max_nesting)[0] + self.cached_objects[ref] = value + return value + + def linearize_page_tree(self, node=None): + if node is None: + node = self.page_tree_root + check_format_condition(node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages") + pages = [] + for kid in node[b"Kids"]: + kid_object = self.read_indirect(kid) + if kid_object[b"Type"] == b"Page": + pages.append(kid) + else: + pages.extend(self.linearize_page_tree(node=kid_object)) + return pages diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfParser.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfParser.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93357ec7580ee9ef67c030eba4ac3b914391fd00 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PdfParser.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PixarImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PixarImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..220577ccec3e1ece6c4be940a69edb681785d448 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PixarImagePlugin.py @@ -0,0 +1,71 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PIXAR raster support for PIL +# +# history: +# 97-01-29 fl Created +# +# notes: +# This is incomplete; it is based on a few samples created with +# Photoshop 2.5 and 3.0, and a summary description provided by +# Greg Coats . Hopefully, "L" and +# "RGBA" support will be added in future versions. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile +from ._binary import i16le as i16 + +__version__ = "0.1" + + +# +# helpers + +def _accept(prefix): + return prefix[:4] == b"\200\350\000\000" + + +## +# Image plugin for PIXAR raster images. + +class PixarImageFile(ImageFile.ImageFile): + + format = "PIXAR" + format_description = "PIXAR raster image" + + def _open(self): + + # assuming a 4-byte magic label + s = self.fp.read(4) + if s != b"\200\350\000\000": + raise SyntaxError("not a PIXAR file") + + # read rest of header + s = s + self.fp.read(508) + + self.size = i16(s[418:420]), i16(s[416:418]) + + # get channel/depth descriptions + mode = i16(s[424:426]), i16(s[426:428]) + + if mode == (14, 2): + self.mode = "RGB" + # FIXME: to be continued... + + # create tile descriptor (assuming "dumped") + self.tile = [("raw", (0, 0)+self.size, 1024, (self.mode, 0, 1))] + + +# +# -------------------------------------------------------------------- + +Image.register_open(PixarImageFile.format, PixarImageFile, _accept) + +Image.register_extension(PixarImageFile.format, ".pxr") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PixarImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PixarImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45f738efc5949fc980992a6b27ffd609ba48532f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PixarImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PngImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PngImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..82606199074c9509a0974e54b93460a7c99eab1f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PngImagePlugin.py @@ -0,0 +1,870 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PNG support code +# +# See "PNG (Portable Network Graphics) Specification, version 1.0; +# W3C Recommendation", 1996-10-01, Thomas Boutell (ed.). +# +# history: +# 1996-05-06 fl Created (couldn't resist it) +# 1996-12-14 fl Upgraded, added read and verify support (0.2) +# 1996-12-15 fl Separate PNG stream parser +# 1996-12-29 fl Added write support, added getchunks +# 1996-12-30 fl Eliminated circular references in decoder (0.3) +# 1998-07-12 fl Read/write 16-bit images as mode I (0.4) +# 2001-02-08 fl Added transparency support (from Zircon) (0.5) +# 2001-04-16 fl Don't close data source in "open" method (0.6) +# 2004-02-24 fl Don't even pretend to support interlaced files (0.7) +# 2004-08-31 fl Do basic sanity check on chunk identifiers (0.8) +# 2004-09-20 fl Added PngInfo chunk container +# 2004-12-18 fl Added DPI read support (based on code by Niki Spahiev) +# 2008-08-13 fl Added tRNS support for RGB images +# 2009-03-06 fl Support for preserving ICC profiles (by Florian Hoech) +# 2009-03-08 fl Added zTXT support (from Lowell Alleman) +# 2009-03-29 fl Read interlaced PNG files (from Conrado Porto Lopes Gouvua) +# +# Copyright (c) 1997-2009 by Secret Labs AB +# Copyright (c) 1996 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import logging +import re +import zlib +import struct + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, i16be as i16, i32be as i32, o16be as o16, o32be as o32 +from ._util import py3 + +__version__ = "0.9" + +logger = logging.getLogger(__name__) + +is_cid = re.compile(br"\w\w\w\w").match + + +_MAGIC = b"\211PNG\r\n\032\n" + + +_MODES = { + # supported bits/color combinations, and corresponding modes/rawmodes + (1, 0): ("1", "1"), + (2, 0): ("L", "L;2"), + (4, 0): ("L", "L;4"), + (8, 0): ("L", "L"), + (16, 0): ("I", "I;16B"), + (8, 2): ("RGB", "RGB"), + (16, 2): ("RGB", "RGB;16B"), + (1, 3): ("P", "P;1"), + (2, 3): ("P", "P;2"), + (4, 3): ("P", "P;4"), + (8, 3): ("P", "P"), + (8, 4): ("LA", "LA"), + (16, 4): ("RGBA", "LA;16B"), # LA;16B->LA not yet available + (8, 6): ("RGBA", "RGBA"), + (16, 6): ("RGBA", "RGBA;16B"), +} + + +_simple_palette = re.compile(b'^\xff*\x00\xff*$') + +# Maximum decompressed size for a iTXt or zTXt chunk. +# Eliminates decompression bombs where compressed chunks can expand 1000x +MAX_TEXT_CHUNK = ImageFile.SAFEBLOCK +# Set the maximum total text chunk size. +MAX_TEXT_MEMORY = 64 * MAX_TEXT_CHUNK + + +def _safe_zlib_decompress(s): + dobj = zlib.decompressobj() + plaintext = dobj.decompress(s, MAX_TEXT_CHUNK) + if dobj.unconsumed_tail: + raise ValueError("Decompressed Data Too Large") + return plaintext + + +def _crc32(data, seed=0): + return zlib.crc32(data, seed) & 0xffffffff + + +# -------------------------------------------------------------------- +# Support classes. Suitable for PNG and related formats like MNG etc. + +class ChunkStream(object): + + def __init__(self, fp): + + self.fp = fp + self.queue = [] + + def read(self): + "Fetch a new chunk. Returns header information." + cid = None + + if self.queue: + cid, pos, length = self.queue.pop() + self.fp.seek(pos) + else: + s = self.fp.read(8) + cid = s[4:] + pos = self.fp.tell() + length = i32(s) + + if not is_cid(cid): + if not ImageFile.LOAD_TRUNCATED_IMAGES: + raise SyntaxError("broken PNG file (chunk %s)" % repr(cid)) + + return cid, pos, length + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + self.queue = self.crc = self.fp = None + + def push(self, cid, pos, length): + + self.queue.append((cid, pos, length)) + + def call(self, cid, pos, length): + "Call the appropriate chunk handler" + + logger.debug("STREAM %r %s %s", cid, pos, length) + return getattr(self, "chunk_" + cid.decode('ascii'))(pos, length) + + def crc(self, cid, data): + "Read and verify checksum" + + # Skip CRC checks for ancillary chunks if allowed to load truncated images + # 5th byte of first char is 1 [specs, section 5.4] + if ImageFile.LOAD_TRUNCATED_IMAGES and (i8(cid[0]) >> 5 & 1): + self.crc_skip(cid, data) + return + + try: + crc1 = _crc32(data, _crc32(cid)) + crc2 = i32(self.fp.read(4)) + if crc1 != crc2: + raise SyntaxError("broken PNG file (bad header checksum in %r)" + % cid) + except struct.error: + raise SyntaxError("broken PNG file (incomplete checksum in %r)" + % cid) + + def crc_skip(self, cid, data): + "Read checksum. Used if the C module is not present" + + self.fp.read(4) + + def verify(self, endchunk=b"IEND"): + + # Simple approach; just calculate checksum for all remaining + # blocks. Must be called directly after open. + + cids = [] + + while True: + try: + cid, pos, length = self.read() + except struct.error: + raise IOError("truncated PNG file") + + if cid == endchunk: + break + self.crc(cid, ImageFile._safe_read(self.fp, length)) + cids.append(cid) + + return cids + + +class iTXt(str): + """ + Subclass of string to allow iTXt chunks to look like strings while + keeping their extra information + + """ + @staticmethod + def __new__(cls, text, lang, tkey): + """ + :param cls: the class to use when creating the instance + :param text: value for this key + :param lang: language code + :param tkey: UTF-8 version of the key name + """ + + self = str.__new__(cls, text) + self.lang = lang + self.tkey = tkey + return self + + +class PngInfo(object): + """ + PNG chunk container (for use with save(pnginfo=)) + + """ + + def __init__(self): + self.chunks = [] + + def add(self, cid, data): + """Appends an arbitrary chunk. Use with caution. + + :param cid: a byte string, 4 bytes long. + :param data: a byte string of the encoded data + + """ + + self.chunks.append((cid, data)) + + def add_itxt(self, key, value, lang="", tkey="", zip=False): + """Appends an iTXt chunk. + + :param key: latin-1 encodable text key name + :param value: value for this key + :param lang: language code + :param tkey: UTF-8 version of the key name + :param zip: compression flag + + """ + + if not isinstance(key, bytes): + key = key.encode("latin-1", "strict") + if not isinstance(value, bytes): + value = value.encode("utf-8", "strict") + if not isinstance(lang, bytes): + lang = lang.encode("utf-8", "strict") + if not isinstance(tkey, bytes): + tkey = tkey.encode("utf-8", "strict") + + if zip: + self.add(b"iTXt", key + b"\0\x01\0" + lang + b"\0" + tkey + b"\0" + + zlib.compress(value)) + else: + self.add(b"iTXt", key + b"\0\0\0" + lang + b"\0" + tkey + b"\0" + + value) + + def add_text(self, key, value, zip=False): + """Appends a text chunk. + + :param key: latin-1 encodable text key name + :param value: value for this key, text or an + :py:class:`PIL.PngImagePlugin.iTXt` instance + :param zip: compression flag + + """ + if isinstance(value, iTXt): + return self.add_itxt(key, value, value.lang, value.tkey, zip=zip) + + # The tEXt chunk stores latin-1 text + if not isinstance(value, bytes): + try: + value = value.encode('latin-1', 'strict') + except UnicodeError: + return self.add_itxt(key, value, zip=zip) + + if not isinstance(key, bytes): + key = key.encode('latin-1', 'strict') + + if zip: + self.add(b"zTXt", key + b"\0\0" + zlib.compress(value)) + else: + self.add(b"tEXt", key + b"\0" + value) + + +# -------------------------------------------------------------------- +# PNG image stream (IHDR/IEND) + +class PngStream(ChunkStream): + + def __init__(self, fp): + + ChunkStream.__init__(self, fp) + + # local copies of Image attributes + self.im_info = {} + self.im_text = {} + self.im_size = (0, 0) + self.im_mode = None + self.im_tile = None + self.im_palette = None + + self.text_memory = 0 + + def check_text_memory(self, chunklen): + self.text_memory += chunklen + if self.text_memory > MAX_TEXT_MEMORY: + raise ValueError("Too much memory used in text chunks: %s>MAX_TEXT_MEMORY" % + self.text_memory) + + def chunk_iCCP(self, pos, length): + + # ICC profile + s = ImageFile._safe_read(self.fp, length) + # according to PNG spec, the iCCP chunk contains: + # Profile name 1-79 bytes (character string) + # Null separator 1 byte (null character) + # Compression method 1 byte (0) + # Compressed profile n bytes (zlib with deflate compression) + i = s.find(b"\0") + logger.debug("iCCP profile name %r", s[:i]) + logger.debug("Compression method %s", i8(s[i])) + comp_method = i8(s[i]) + if comp_method != 0: + raise SyntaxError("Unknown compression method %s in iCCP chunk" % + comp_method) + try: + icc_profile = _safe_zlib_decompress(s[i+2:]) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + icc_profile = None + else: + raise + except zlib.error: + icc_profile = None # FIXME + self.im_info["icc_profile"] = icc_profile + return s + + def chunk_IHDR(self, pos, length): + + # image header + s = ImageFile._safe_read(self.fp, length) + self.im_size = i32(s), i32(s[4:]) + try: + self.im_mode, self.im_rawmode = _MODES[(i8(s[8]), i8(s[9]))] + except: + pass + if i8(s[12]): + self.im_info["interlace"] = 1 + if i8(s[11]): + raise SyntaxError("unknown filter category") + return s + + def chunk_IDAT(self, pos, length): + + # image data + self.im_tile = [("zip", (0, 0)+self.im_size, pos, self.im_rawmode)] + self.im_idat = length + raise EOFError + + def chunk_IEND(self, pos, length): + + # end of PNG image + raise EOFError + + def chunk_PLTE(self, pos, length): + + # palette + s = ImageFile._safe_read(self.fp, length) + if self.im_mode == "P": + self.im_palette = "RGB", s + return s + + def chunk_tRNS(self, pos, length): + + # transparency + s = ImageFile._safe_read(self.fp, length) + if self.im_mode == "P": + if _simple_palette.match(s): + # tRNS contains only one full-transparent entry, + # other entries are full opaque + i = s.find(b"\0") + if i >= 0: + self.im_info["transparency"] = i + else: + # otherwise, we have a byte string with one alpha value + # for each palette entry + self.im_info["transparency"] = s + elif self.im_mode == "L": + self.im_info["transparency"] = i16(s) + elif self.im_mode == "RGB": + self.im_info["transparency"] = i16(s), i16(s[2:]), i16(s[4:]) + return s + + def chunk_gAMA(self, pos, length): + # gamma setting + s = ImageFile._safe_read(self.fp, length) + self.im_info["gamma"] = i32(s) / 100000.0 + return s + + def chunk_cHRM(self, pos, length): + # chromaticity, 8 unsigned ints, actual value is scaled by 100,000 + # WP x,y, Red x,y, Green x,y Blue x,y + + s = ImageFile._safe_read(self.fp, length) + raw_vals = struct.unpack('>%dI' % (len(s) // 4), s) + self.im_info['chromaticity'] = tuple(elt/100000.0 for elt in raw_vals) + return s + + def chunk_sRGB(self, pos, length): + # srgb rendering intent, 1 byte + # 0 perceptual + # 1 relative colorimetric + # 2 saturation + # 3 absolute colorimetric + + s = ImageFile._safe_read(self.fp, length) + self.im_info['srgb'] = i8(s) + return s + + def chunk_pHYs(self, pos, length): + + # pixels per unit + s = ImageFile._safe_read(self.fp, length) + px, py = i32(s), i32(s[4:]) + unit = i8(s[8]) + if unit == 1: # meter + dpi = int(px * 0.0254 + 0.5), int(py * 0.0254 + 0.5) + self.im_info["dpi"] = dpi + elif unit == 0: + self.im_info["aspect"] = px, py + return s + + def chunk_tEXt(self, pos, length): + + # text + s = ImageFile._safe_read(self.fp, length) + try: + k, v = s.split(b"\0", 1) + except ValueError: + # fallback for broken tEXt tags + k = s + v = b"" + if k: + if py3: + k = k.decode('latin-1', 'strict') + v = v.decode('latin-1', 'replace') + + self.im_info[k] = self.im_text[k] = v + self.check_text_memory(len(v)) + + return s + + def chunk_zTXt(self, pos, length): + + # compressed text + s = ImageFile._safe_read(self.fp, length) + try: + k, v = s.split(b"\0", 1) + except ValueError: + k = s + v = b"" + if v: + comp_method = i8(v[0]) + else: + comp_method = 0 + if comp_method != 0: + raise SyntaxError("Unknown compression method %s in zTXt chunk" % + comp_method) + try: + v = _safe_zlib_decompress(v[1:]) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + v = b"" + else: + raise + except zlib.error: + v = b"" + + if k: + if py3: + k = k.decode('latin-1', 'strict') + v = v.decode('latin-1', 'replace') + + self.im_info[k] = self.im_text[k] = v + self.check_text_memory(len(v)) + + return s + + def chunk_iTXt(self, pos, length): + + # international text + r = s = ImageFile._safe_read(self.fp, length) + try: + k, r = r.split(b"\0", 1) + except ValueError: + return s + if len(r) < 2: + return s + cf, cm, r = i8(r[0]), i8(r[1]), r[2:] + try: + lang, tk, v = r.split(b"\0", 2) + except ValueError: + return s + if cf != 0: + if cm == 0: + try: + v = _safe_zlib_decompress(v) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + return s + else: + raise + except zlib.error: + return s + else: + return s + if py3: + try: + k = k.decode("latin-1", "strict") + lang = lang.decode("utf-8", "strict") + tk = tk.decode("utf-8", "strict") + v = v.decode("utf-8", "strict") + except UnicodeError: + return s + + self.im_info[k] = self.im_text[k] = iTXt(v, lang, tk) + self.check_text_memory(len(v)) + + return s + + +# -------------------------------------------------------------------- +# PNG reader + +def _accept(prefix): + return prefix[:8] == _MAGIC + + +## +# Image plugin for PNG images. + +class PngImageFile(ImageFile.ImageFile): + + format = "PNG" + format_description = "Portable network graphics" + + def _open(self): + + if self.fp.read(8) != _MAGIC: + raise SyntaxError("not a PNG file") + + # + # Parse headers up to the first IDAT chunk + + self.png = PngStream(self.fp) + + while True: + + # + # get next chunk + + cid, pos, length = self.png.read() + + try: + s = self.png.call(cid, pos, length) + except EOFError: + break + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + s = ImageFile._safe_read(self.fp, length) + + self.png.crc(cid, s) + + # + # Copy relevant attributes from the PngStream. An alternative + # would be to let the PngStream class modify these attributes + # directly, but that introduces circular references which are + # difficult to break if things go wrong in the decoder... + # (believe me, I've tried ;-) + + self.mode = self.png.im_mode + self.size = self.png.im_size + self.info = self.png.im_info + self.text = self.png.im_text # experimental + self.tile = self.png.im_tile + + if self.png.im_palette: + rawmode, data = self.png.im_palette + self.palette = ImagePalette.raw(rawmode, data) + + self.__idat = length # used by load_read() + + def verify(self): + "Verify PNG file" + + if self.fp is None: + raise RuntimeError("verify must be called directly after open") + + # back up to beginning of IDAT block + self.fp.seek(self.tile[0][2] - 8) + + self.png.verify() + self.png.close() + + self.fp = None + + def load_prepare(self): + "internal: prepare to read PNG file" + + if self.info.get("interlace"): + self.decoderconfig = self.decoderconfig + (1,) + + ImageFile.ImageFile.load_prepare(self) + + def load_read(self, read_bytes): + "internal: read more image data" + + while self.__idat == 0: + # end of chunk, skip forward to next one + + self.fp.read(4) # CRC + + cid, pos, length = self.png.read() + + if cid not in [b"IDAT", b"DDAT"]: + self.png.push(cid, pos, length) + return b"" + + self.__idat = length # empty chunks are allowed + + # read more data from this chunk + if read_bytes <= 0: + read_bytes = self.__idat + else: + read_bytes = min(read_bytes, self.__idat) + + self.__idat = self.__idat - read_bytes + + return self.fp.read(read_bytes) + + def load_end(self): + "internal: finished reading image data" + + self.png.close() + self.png = None + + +# -------------------------------------------------------------------- +# PNG writer + +_OUTMODES = { + # supported PIL modes, and corresponding rawmodes/bits/color combinations + "1": ("1", b'\x01\x00'), + "L;1": ("L;1", b'\x01\x00'), + "L;2": ("L;2", b'\x02\x00'), + "L;4": ("L;4", b'\x04\x00'), + "L": ("L", b'\x08\x00'), + "LA": ("LA", b'\x08\x04'), + "I": ("I;16B", b'\x10\x00'), + "P;1": ("P;1", b'\x01\x03'), + "P;2": ("P;2", b'\x02\x03'), + "P;4": ("P;4", b'\x04\x03'), + "P": ("P", b'\x08\x03'), + "RGB": ("RGB", b'\x08\x02'), + "RGBA": ("RGBA", b'\x08\x06'), +} + + +def putchunk(fp, cid, *data): + """Write a PNG chunk (including CRC field)""" + + data = b"".join(data) + + fp.write(o32(len(data)) + cid) + fp.write(data) + crc = _crc32(data, _crc32(cid)) + fp.write(o32(crc)) + + +class _idat(object): + # wrap output from the encoder in IDAT chunks + + def __init__(self, fp, chunk): + self.fp = fp + self.chunk = chunk + + def write(self, data): + self.chunk(self.fp, b"IDAT", data) + + +def _save(im, fp, filename, chunk=putchunk): + # save an image to disk (called by the save method) + + mode = im.mode + + if mode == "P": + + # + # attempt to minimize storage requirements for palette images + if "bits" in im.encoderinfo: + # number of bits specified by user + colors = 1 << im.encoderinfo["bits"] + else: + # check palette contents + if im.palette: + colors = max(min(len(im.palette.getdata()[1])//3, 256), 2) + else: + colors = 256 + + if colors <= 2: + bits = 1 + elif colors <= 4: + bits = 2 + elif colors <= 16: + bits = 4 + else: + bits = 8 + if bits != 8: + mode = "%s;%d" % (mode, bits) + + # encoder options + im.encoderconfig = (im.encoderinfo.get("optimize", False), + im.encoderinfo.get("compress_level", -1), + im.encoderinfo.get("compress_type", -1), + im.encoderinfo.get("dictionary", b"")) + + # get the corresponding PNG mode + try: + rawmode, mode = _OUTMODES[mode] + except KeyError: + raise IOError("cannot write mode %s as PNG" % mode) + + # + # write minimal PNG file + + fp.write(_MAGIC) + + chunk(fp, b"IHDR", + o32(im.size[0]), o32(im.size[1]), # 0: size + mode, # 8: depth/type + b'\0', # 10: compression + b'\0', # 11: filter category + b'\0') # 12: interlace flag + + chunks = [b"cHRM", b"gAMA", b"sBIT", b"sRGB", b"tIME"] + + icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile")) + if icc: + # ICC profile + # according to PNG spec, the iCCP chunk contains: + # Profile name 1-79 bytes (character string) + # Null separator 1 byte (null character) + # Compression method 1 byte (0) + # Compressed profile n bytes (zlib with deflate compression) + name = b"ICC Profile" + data = name + b"\0\0" + zlib.compress(icc) + chunk(fp, b"iCCP", data) + + # You must either have sRGB or iCCP. + # Disallow sRGB chunks when an iCCP-chunk has been emitted. + chunks.remove(b"sRGB") + + info = im.encoderinfo.get("pnginfo") + if info: + chunks_multiple_allowed = [b"sPLT", b"iTXt", b"tEXt", b"zTXt"] + for cid, data in info.chunks: + if cid in chunks: + chunks.remove(cid) + chunk(fp, cid, data) + elif cid in chunks_multiple_allowed: + chunk(fp, cid, data) + + if im.mode == "P": + palette_byte_number = (2 ** bits) * 3 + palette_bytes = im.im.getpalette("RGB")[:palette_byte_number] + while len(palette_bytes) < palette_byte_number: + palette_bytes += b'\0' + chunk(fp, b"PLTE", palette_bytes) + + transparency = im.encoderinfo.get('transparency', + im.info.get('transparency', None)) + + if transparency or transparency == 0: + if im.mode == "P": + # limit to actual palette size + alpha_bytes = 2**bits + if isinstance(transparency, bytes): + chunk(fp, b"tRNS", transparency[:alpha_bytes]) + else: + transparency = max(0, min(255, transparency)) + alpha = b'\xFF' * transparency + b'\0' + chunk(fp, b"tRNS", alpha[:alpha_bytes]) + elif im.mode == "L": + transparency = max(0, min(65535, transparency)) + chunk(fp, b"tRNS", o16(transparency)) + elif im.mode == "RGB": + red, green, blue = transparency + chunk(fp, b"tRNS", o16(red) + o16(green) + o16(blue)) + else: + if "transparency" in im.encoderinfo: + # don't bother with transparency if it's an RGBA + # and it's in the info dict. It's probably just stale. + raise IOError("cannot use transparency for this mode") + else: + if im.mode == "P" and im.im.getpalettemode() == "RGBA": + alpha = im.im.getpalette("RGBA", "A") + alpha_bytes = 2**bits + chunk(fp, b"tRNS", alpha[:alpha_bytes]) + + dpi = im.encoderinfo.get("dpi") + if dpi: + chunk(fp, b"pHYs", + o32(int(dpi[0] / 0.0254 + 0.5)), + o32(int(dpi[1] / 0.0254 + 0.5)), + b'\x01') + + info = im.encoderinfo.get("pnginfo") + if info: + chunks = [b"bKGD", b"hIST"] + for cid, data in info.chunks: + if cid in chunks: + chunks.remove(cid) + chunk(fp, cid, data) + + ImageFile._save(im, _idat(fp, chunk), + [("zip", (0, 0)+im.size, 0, rawmode)]) + + chunk(fp, b"IEND", b"") + + if hasattr(fp, "flush"): + fp.flush() + + +# -------------------------------------------------------------------- +# PNG chunk converter + +def getchunks(im, **params): + """Return a list of PNG chunks representing this image.""" + + class collector(object): + data = [] + + def write(self, data): + pass + + def append(self, chunk): + self.data.append(chunk) + + def append(fp, cid, *data): + data = b"".join(data) + crc = o32(_crc32(data, _crc32(cid))) + fp.append((cid, data, crc)) + + fp = collector() + + try: + im.encoderinfo = params + _save(im, fp, None, append) + finally: + del im.encoderinfo + + return fp.data + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(PngImageFile.format, PngImageFile, _accept) +Image.register_save(PngImageFile.format, _save) + +Image.register_extension(PngImageFile.format, ".png") + +Image.register_mime(PngImageFile.format, "image/png") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PngImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PngImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dad16acd617d6a9861b4ffe0348a413411898aa1 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PngImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PpmImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PpmImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..c599ba8d53e6a019a5de61c23f3f6f5f0b4040f5 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PpmImagePlugin.py @@ -0,0 +1,157 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PPM support for PIL +# +# History: +# 96-03-24 fl Created +# 98-03-06 fl Write RGBA images (as RGB, that is) +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile + +__version__ = "0.2" + +# +# -------------------------------------------------------------------- + +b_whitespace = b'\x20\x09\x0a\x0b\x0c\x0d' + +MODES = { + # standard + b"P4": "1", + b"P5": "L", + b"P6": "RGB", + # extensions + b"P0CMYK": "CMYK", + # PIL extensions (for test purposes only) + b"PyP": "P", + b"PyRGBA": "RGBA", + b"PyCMYK": "CMYK" +} + + +def _accept(prefix): + return prefix[0:1] == b"P" and prefix[1] in b"0456y" + + +## +# Image plugin for PBM, PGM, and PPM images. + +class PpmImageFile(ImageFile.ImageFile): + + format = "PPM" + format_description = "Pbmplus image" + + def _token(self, s=b""): + while True: # read until next whitespace + c = self.fp.read(1) + if not c or c in b_whitespace: + break + if c > b'\x79': + raise ValueError("Expected ASCII value, found binary") + s = s + c + if (len(s) > 9): + raise ValueError("Expected int, got > 9 digits") + return s + + def _open(self): + + # check magic + s = self.fp.read(1) + if s != b"P": + raise SyntaxError("not a PPM file") + mode = MODES[self._token(s)] + + if mode == "1": + self.mode = "1" + rawmode = "1;I" + else: + self.mode = rawmode = mode + + for ix in range(3): + while True: + while True: + s = self.fp.read(1) + if s not in b_whitespace: + break + if s == b"": + raise ValueError("File does not extend beyond magic number") + if s != b"#": + break + s = self.fp.readline() + s = int(self._token(s)) + if ix == 0: + xsize = s + elif ix == 1: + ysize = s + if mode == "1": + break + elif ix == 2: + # maxgrey + if s > 255: + if not mode == 'L': + raise ValueError("Too many colors for band: %s" % s) + if s < 2**16: + self.mode = 'I' + rawmode = 'I;16B' + else: + self.mode = 'I' + rawmode = 'I;32B' + + self.size = xsize, ysize + self.tile = [("raw", + (0, 0, xsize, ysize), + self.fp.tell(), + (rawmode, 0, 1))] + + +# +# -------------------------------------------------------------------- + +def _save(im, fp, filename): + if im.mode == "1": + rawmode, head = "1;I", b"P4" + elif im.mode == "L": + rawmode, head = "L", b"P5" + elif im.mode == "I": + if im.getextrema()[1] < 2**16: + rawmode, head = "I;16B", b"P5" + else: + rawmode, head = "I;32B", b"P5" + elif im.mode == "RGB": + rawmode, head = "RGB", b"P6" + elif im.mode == "RGBA": + rawmode, head = "RGB", b"P6" + else: + raise IOError("cannot write mode %s as PPM" % im.mode) + fp.write(head + ("\n%d %d\n" % im.size).encode('ascii')) + if head == b"P6": + fp.write(b"255\n") + if head == b"P5": + if rawmode == "L": + fp.write(b"255\n") + elif rawmode == "I;16B": + fp.write(b"65535\n") + elif rawmode == "I;32B": + fp.write(b"2147483648\n") + ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, 1))]) + + # ALTERNATIVE: save via builtin debug function + # im._dump(filename) + +# +# -------------------------------------------------------------------- + + +Image.register_open(PpmImageFile.format, PpmImageFile, _accept) +Image.register_save(PpmImageFile.format, _save) + +Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm"]) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PpmImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PpmImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04308e5c9fbc6c1a6acec714bfcb5951b50dda88 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PpmImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PsdImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PsdImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..f6e04f78b340f735d7c49794d8848140b411dace --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PsdImagePlugin.py @@ -0,0 +1,307 @@ +# +# The Python Imaging Library +# $Id$ +# +# Adobe PSD 2.5/3.0 file handling +# +# History: +# 1995-09-01 fl Created +# 1997-01-03 fl Read most PSD images +# 1997-01-18 fl Fixed P and CMYK support +# 2001-10-21 fl Added seek/tell support (for layers) +# +# Copyright (c) 1997-2001 by Secret Labs AB. +# Copyright (c) 1995-2001 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +__version__ = "0.4" + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, i16be as i16, i32be as i32 + +MODES = { + # (photoshop mode, bits) -> (pil mode, required channels) + (0, 1): ("1", 1), + (0, 8): ("L", 1), + (1, 8): ("L", 1), + (2, 8): ("P", 1), + (3, 8): ("RGB", 3), + (4, 8): ("CMYK", 4), + (7, 8): ("L", 1), # FIXME: multilayer + (8, 8): ("L", 1), # duotone + (9, 8): ("LAB", 3) +} + + +# --------------------------------------------------------------------. +# read PSD images + +def _accept(prefix): + return prefix[:4] == b"8BPS" + + +## +# Image plugin for Photoshop images. + +class PsdImageFile(ImageFile.ImageFile): + + format = "PSD" + format_description = "Adobe Photoshop" + + def _open(self): + + read = self.fp.read + + # + # header + + s = read(26) + if s[:4] != b"8BPS" or i16(s[4:]) != 1: + raise SyntaxError("not a PSD file") + + psd_bits = i16(s[22:]) + psd_channels = i16(s[12:]) + psd_mode = i16(s[24:]) + + mode, channels = MODES[(psd_mode, psd_bits)] + + if channels > psd_channels: + raise IOError("not enough channels") + + self.mode = mode + self.size = i32(s[18:]), i32(s[14:]) + + # + # color mode data + + size = i32(read(4)) + if size: + data = read(size) + if mode == "P" and size == 768: + self.palette = ImagePalette.raw("RGB;L", data) + + # + # image resources + + self.resources = [] + + size = i32(read(4)) + if size: + # load resources + end = self.fp.tell() + size + while self.fp.tell() < end: + signature = read(4) + id = i16(read(2)) + name = read(i8(read(1))) + if not (len(name) & 1): + read(1) # padding + data = read(i32(read(4))) + if (len(data) & 1): + read(1) # padding + self.resources.append((id, name, data)) + if id == 1039: # ICC profile + self.info["icc_profile"] = data + + # + # layer and mask information + + self.layers = [] + + size = i32(read(4)) + if size: + end = self.fp.tell() + size + size = i32(read(4)) + if size: + self.layers = _layerinfo(self.fp) + self.fp.seek(end) + + # + # image descriptor + + self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels) + + # keep the file open + self._fp = self.fp + self.frame = 1 + self._min_frame = 1 + + @property + def n_frames(self): + return len(self.layers) + + @property + def is_animated(self): + return len(self.layers) > 1 + + def seek(self, layer): + if not self._seek_check(layer): + return + + # seek to given layer (1..max) + try: + name, mode, bbox, tile = self.layers[layer-1] + self.mode = mode + self.tile = tile + self.frame = layer + self.fp = self._fp + return name, bbox + except IndexError: + raise EOFError("no such layer") + + def tell(self): + # return layer number (0=image, 1..max=layers) + return self.frame + + def load_prepare(self): + # create image memory if necessary + if not self.im or\ + self.im.mode != self.mode or self.im.size != self.size: + self.im = Image.core.fill(self.mode, self.size, 0) + # create palette (optional) + if self.mode == "P": + Image.Image.load(self) + + +def _layerinfo(file): + # read layerinfo block + layers = [] + read = file.read + for i in range(abs(i16(read(2)))): + + # bounding box + y0 = i32(read(4)) + x0 = i32(read(4)) + y1 = i32(read(4)) + x1 = i32(read(4)) + + # image info + info = [] + mode = [] + types = list(range(i16(read(2)))) + if len(types) > 4: + continue + + for i in types: + type = i16(read(2)) + + if type == 65535: + m = "A" + else: + m = "RGBA"[type] + + mode.append(m) + size = i32(read(4)) + info.append((m, size)) + + # figure out the image mode + mode.sort() + if mode == ["R"]: + mode = "L" + elif mode == ["B", "G", "R"]: + mode = "RGB" + elif mode == ["A", "B", "G", "R"]: + mode = "RGBA" + else: + mode = None # unknown + + # skip over blend flags and extra information + filler = read(12) + name = "" + size = i32(read(4)) + combined = 0 + if size: + length = i32(read(4)) + if length: + mask_y = i32(read(4)) + mask_x = i32(read(4)) + mask_h = i32(read(4)) - mask_y + mask_w = i32(read(4)) - mask_x + file.seek(length - 16, 1) + combined += length + 4 + + length = i32(read(4)) + if length: + file.seek(length, 1) + combined += length + 4 + + length = i8(read(1)) + if length: + # Don't know the proper encoding, + # Latin-1 should be a good guess + name = read(length).decode('latin-1', 'replace') + combined += length + 1 + + file.seek(size - combined, 1) + layers.append((name, mode, (x0, y0, x1, y1))) + + # get tiles + i = 0 + for name, mode, bbox in layers: + tile = [] + for m in mode: + t = _maketile(file, m, bbox, 1) + if t: + tile.extend(t) + layers[i] = name, mode, bbox, tile + i += 1 + + return layers + + +def _maketile(file, mode, bbox, channels): + + tile = None + read = file.read + + compression = i16(read(2)) + + xsize = bbox[2] - bbox[0] + ysize = bbox[3] - bbox[1] + + offset = file.tell() + + if compression == 0: + # + # raw compression + tile = [] + for channel in range(channels): + layer = mode[channel] + if mode == "CMYK": + layer += ";I" + tile.append(("raw", bbox, offset, layer)) + offset = offset + xsize*ysize + + elif compression == 1: + # + # packbits compression + i = 0 + tile = [] + bytecount = read(channels * ysize * 2) + offset = file.tell() + for channel in range(channels): + layer = mode[channel] + if mode == "CMYK": + layer += ";I" + tile.append( + ("packbits", bbox, offset, layer) + ) + for y in range(ysize): + offset = offset + i16(bytecount[i:i+2]) + i += 2 + + file.seek(offset) + + if offset & 1: + read(1) # padding + + return tile + +# -------------------------------------------------------------------- +# registry + + +Image.register_open(PsdImageFile.format, PsdImageFile, _accept) + +Image.register_extension(PsdImageFile.format, ".psd") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PsdImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PsdImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec5389405c57b678f9dd202063d0c31facd60d9a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PsdImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PyAccess.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PyAccess.py new file mode 100644 index 0000000000000000000000000000000000000000..cce2de2b8113300401e82b0eb048282d746ed1c8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PyAccess.py @@ -0,0 +1,319 @@ +# +# The Python Imaging Library +# Pillow fork +# +# Python implementation of the PixelAccess Object +# +# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-2009 by Fredrik Lundh. +# Copyright (c) 2013 Eric Soroos +# +# See the README file for information on usage and redistribution +# + +# Notes: +# +# * Implements the pixel access object following Access. +# * Does not implement the line functions, as they don't appear to be used +# * Taking only the tuple form, which is used from python. +# * Fill.c uses the integer form, but it's still going to use the old +# Access.c implementation. +# + +import logging +import sys + +from cffi import FFI + + +logger = logging.getLogger(__name__) + + +defs = """ +struct Pixel_RGBA { + unsigned char r,g,b,a; +}; +struct Pixel_I16 { + unsigned char l,r; +}; +""" +ffi = FFI() +ffi.cdef(defs) + + +class PyAccess(object): + + def __init__(self, img, readonly=False): + vals = dict(img.im.unsafe_ptrs) + self.readonly = readonly + self.image8 = ffi.cast('unsigned char **', vals['image8']) + self.image32 = ffi.cast('int **', vals['image32']) + self.image = ffi.cast('unsigned char **', vals['image']) + self.xsize, self.ysize = img.im.size + + # Keep pointer to im object to prevent dereferencing. + self._im = img.im + + # Debugging is polluting test traces, only useful here + # when hacking on PyAccess + # logger.debug("%s", vals) + self._post_init() + + def _post_init(self): + pass + + def __setitem__(self, xy, color): + """ + Modifies the pixel at x,y. The color is given as a single + numerical value for single band images, and a tuple for + multi-band images + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :param color: The pixel value. + """ + if self.readonly: + raise ValueError('Attempt to putpixel a read only image') + (x, y) = self.check_xy(xy) + return self.set_pixel(x, y, color) + + def __getitem__(self, xy): + """ + Returns the pixel at x,y. The pixel is returned as a single + value for single band images or a tuple for multiple band + images + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :returns: a pixel value for single band images, a tuple of + pixel values for multiband images. + """ + + (x, y) = self.check_xy(xy) + return self.get_pixel(x, y) + + putpixel = __setitem__ + getpixel = __getitem__ + + def check_xy(self, xy): + (x, y) = xy + if not (0 <= x < self.xsize and 0 <= y < self.ysize): + raise ValueError('pixel location out of range') + return xy + + +class _PyAccess32_2(PyAccess): + """ PA, LA, stored in first and last bytes of a 32 bit word """ + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return (pixel.r, pixel.a) + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + # tuple + pixel.r = min(color[0], 255) + pixel.a = min(color[1], 255) + + +class _PyAccess32_3(PyAccess): + """ RGB and friends, stored in the first three bytes of a 32 bit word """ + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return (pixel.r, pixel.g, pixel.b) + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + # tuple + pixel.r = min(color[0], 255) + pixel.g = min(color[1], 255) + pixel.b = min(color[2], 255) + pixel.a = 255 + + +class _PyAccess32_4(PyAccess): + """ RGBA etc, all 4 bytes of a 32 bit word """ + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return (pixel.r, pixel.g, pixel.b, pixel.a) + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + # tuple + pixel.r = min(color[0], 255) + pixel.g = min(color[1], 255) + pixel.b = min(color[2], 255) + pixel.a = min(color[3], 255) + + +class _PyAccess8(PyAccess): + """ 1, L, P, 8 bit images stored as uint8 """ + def _post_init(self, *args, **kwargs): + self.pixels = self.image8 + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + try: + # integer + self.pixels[y][x] = min(color, 255) + except TypeError: + # tuple + self.pixels[y][x] = min(color[0], 255) + + +class _PyAccessI16_N(PyAccess): + """ I;16 access, native bitendian without conversion """ + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast('unsigned short **', self.image) + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + try: + # integer + self.pixels[y][x] = min(color, 65535) + except TypeError: + # tuple + self.pixels[y][x] = min(color[0], 65535) + + +class _PyAccessI16_L(PyAccess): + """ I;16L access, with conversion """ + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast('struct Pixel_I16 **', self.image) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return pixel.l + pixel.r * 256 + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + try: + color = min(color, 65535) + except TypeError: + color = min(color[0], 65535) + + pixel.l = color & 0xFF + pixel.r = color >> 8 + + +class _PyAccessI16_B(PyAccess): + """ I;16B access, with conversion """ + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast('struct Pixel_I16 **', self.image) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return pixel.l * 256 + pixel.r + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + try: + color = min(color, 65535) + except: + color = min(color[0], 65535) + + pixel.l = color >> 8 + pixel.r = color & 0xFF + + +class _PyAccessI32_N(PyAccess): + """ Signed Int32 access, native endian """ + def _post_init(self, *args, **kwargs): + self.pixels = self.image32 + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + self.pixels[y][x] = color + + +class _PyAccessI32_Swap(PyAccess): + """ I;32L/B access, with byteswapping conversion """ + def _post_init(self, *args, **kwargs): + self.pixels = self.image32 + + def reverse(self, i): + orig = ffi.new('int *', i) + chars = ffi.cast('unsigned char *', orig) + chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], \ + chars[1], chars[0] + return ffi.cast('int *', chars)[0] + + def get_pixel(self, x, y): + return self.reverse(self.pixels[y][x]) + + def set_pixel(self, x, y, color): + self.pixels[y][x] = self.reverse(color) + + +class _PyAccessF(PyAccess): + """ 32 bit float access """ + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast('float **', self.image32) + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + try: + # not a tuple + self.pixels[y][x] = color + except TypeError: + # tuple + self.pixels[y][x] = color[0] + + +mode_map = {'1': _PyAccess8, + 'L': _PyAccess8, + 'P': _PyAccess8, + 'LA': _PyAccess32_2, + 'La': _PyAccess32_2, + 'PA': _PyAccess32_2, + 'RGB': _PyAccess32_3, + 'LAB': _PyAccess32_3, + 'HSV': _PyAccess32_3, + 'YCbCr': _PyAccess32_3, + 'RGBA': _PyAccess32_4, + 'RGBa': _PyAccess32_4, + 'RGBX': _PyAccess32_4, + 'CMYK': _PyAccess32_4, + 'F': _PyAccessF, + 'I': _PyAccessI32_N, + } + +if sys.byteorder == 'little': + mode_map['I;16'] = _PyAccessI16_N + mode_map['I;16L'] = _PyAccessI16_N + mode_map['I;16B'] = _PyAccessI16_B + + mode_map['I;32L'] = _PyAccessI32_N + mode_map['I;32B'] = _PyAccessI32_Swap +else: + mode_map['I;16'] = _PyAccessI16_L + mode_map['I;16L'] = _PyAccessI16_L + mode_map['I;16B'] = _PyAccessI16_N + + mode_map['I;32L'] = _PyAccessI32_Swap + mode_map['I;32B'] = _PyAccessI32_N + + +def new(img, readonly=False): + access_type = mode_map.get(img.mode, None) + if not access_type: + logger.debug("PyAccess Not Implemented: %s", img.mode) + return None + return access_type(img, readonly) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PyAccess.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PyAccess.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e83545ffeb0478f08e3bbc9800741e845c1c3de9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/PyAccess.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SgiImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SgiImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..ef0f40ebd6267d9c6ea1e88ba2b5d42327103965 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SgiImagePlugin.py @@ -0,0 +1,227 @@ +# +# The Python Imaging Library. +# $Id$ +# +# SGI image file handling +# +# See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli. +# +# +# +# History: +# 2017-22-07 mb Add RLE decompression +# 2016-16-10 mb Add save method without compression +# 1995-09-10 fl Created +# +# Copyright (c) 2016 by Mickael Bonfill. +# Copyright (c) 2008 by Karsten Hiddemann. +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1995 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile +from ._binary import i8, o8, i16be as i16 +from ._util import py3 +import struct +import os + + +__version__ = "0.3" + + +def _accept(prefix): + return len(prefix) >= 2 and i16(prefix) == 474 + + +MODES = { + (1, 1, 1): "L", + (1, 2, 1): "L", + (2, 1, 1): "L;16B", + (2, 2, 1): "L;16B", + (1, 3, 3): "RGB", + (2, 3, 3): "RGB;16B", + (1, 3, 4): "RGBA", + (2, 3, 4): "RGBA;16B" +} + + +## +# Image plugin for SGI images. +class SgiImageFile(ImageFile.ImageFile): + + format = "SGI" + format_description = "SGI Image File Format" + + def _open(self): + + # HEAD + headlen = 512 + s = self.fp.read(headlen) + + # magic number : 474 + if i16(s) != 474: + raise ValueError("Not an SGI image file") + + # compression : verbatim or RLE + compression = i8(s[2]) + + # bpc : 1 or 2 bytes (8bits or 16bits) + bpc = i8(s[3]) + + # dimension : 1, 2 or 3 (depending on xsize, ysize and zsize) + dimension = i16(s[4:]) + + # xsize : width + xsize = i16(s[6:]) + + # ysize : height + ysize = i16(s[8:]) + + # zsize : channels count + zsize = i16(s[10:]) + + # layout + layout = bpc, dimension, zsize + + # determine mode from bits/zsize + rawmode = "" + try: + rawmode = MODES[layout] + except KeyError: + pass + + if rawmode == "": + raise ValueError("Unsupported SGI image mode") + + self.size = xsize, ysize + self.mode = rawmode.split(";")[0] + + # orientation -1 : scanlines begins at the bottom-left corner + orientation = -1 + + # decoder info + if compression == 0: + pagesize = xsize * ysize * bpc + if bpc == 2: + self.tile = [("SGI16", (0, 0) + self.size, + headlen, (self.mode, 0, orientation))] + else: + self.tile = [] + offset = headlen + for layer in self.mode: + self.tile.append( + ("raw", (0, 0) + self.size, + offset, (layer, 0, orientation))) + offset += pagesize + elif compression == 1: + self.tile = [("sgi_rle", (0, 0) + self.size, + headlen, (rawmode, orientation, bpc))] + + +def _save(im, fp, filename): + if im.mode != "RGB" and im.mode != "RGBA" and im.mode != "L": + raise ValueError("Unsupported SGI image mode") + + # Get the keyword arguments + info = im.encoderinfo + + # Byte-per-pixel precision, 1 = 8bits per pixel + bpc = info.get("bpc", 1) + + if bpc not in (1, 2): + raise ValueError("Unsupported number of bytes per pixel") + + # Flip the image, since the origin of SGI file is the bottom-left corner + orientation = -1 + # Define the file as SGI File Format + magicNumber = 474 + # Run-Length Encoding Compression - Unsupported at this time + rle = 0 + + # Number of dimensions (x,y,z) + dim = 3 + # X Dimension = width / Y Dimension = height + x, y = im.size + if im.mode == "L" and y == 1: + dim = 1 + elif im.mode == "L": + dim = 2 + # Z Dimension: Number of channels + z = len(im.mode) + + if dim == 1 or dim == 2: + z = 1 + + # assert we've got the right number of bands. + if len(im.getbands()) != z: + raise ValueError("incorrect number of bands in SGI write: %s vs %s" % + (z, len(im.getbands()))) + + # Minimum Byte value + pinmin = 0 + # Maximum Byte value (255 = 8bits per pixel) + pinmax = 255 + # Image name (79 characters max, truncated below in write) + imgName = os.path.splitext(os.path.basename(filename))[0] + if py3: + imgName = imgName.encode('ascii', 'ignore') + # Standard representation of pixel in the file + colormap = 0 + fp.write(struct.pack('>h', magicNumber)) + fp.write(o8(rle)) + fp.write(o8(bpc)) + fp.write(struct.pack('>H', dim)) + fp.write(struct.pack('>H', x)) + fp.write(struct.pack('>H', y)) + fp.write(struct.pack('>H', z)) + fp.write(struct.pack('>l', pinmin)) + fp.write(struct.pack('>l', pinmax)) + fp.write(struct.pack('4s', b'')) # dummy + fp.write(struct.pack('79s', imgName)) # truncates to 79 chars + fp.write(struct.pack('s', b'')) # force null byte after imgname + fp.write(struct.pack('>l', colormap)) + fp.write(struct.pack('404s', b'')) # dummy + + rawmode = 'L' + if bpc == 2: + rawmode = 'L;16B' + + for channel in im.split(): + fp.write(channel.tobytes('raw', rawmode, 0, orientation)) + + fp.close() + + +class SGI16Decoder(ImageFile.PyDecoder): + _pulls_fd = True + + def decode(self, buffer): + rawmode, stride, orientation = self.args + pagesize = self.state.xsize * self.state.ysize + zsize = len(self.mode) + self.fd.seek(512) + + for band in range(zsize): + channel = Image.new('L', (self.state.xsize, self.state.ysize)) + channel.frombytes(self.fd.read(2 * pagesize), 'raw', + 'L;16B', stride, orientation) + self.im.putband(channel.im, band) + + return -1, 0 + +# +# registry + + +Image.register_decoder("SGI16", SGI16Decoder) +Image.register_open(SgiImageFile.format, SgiImageFile, _accept) +Image.register_save(SgiImageFile.format, _save) +Image.register_mime(SgiImageFile.format, "image/sgi") +Image.register_mime(SgiImageFile.format, "image/rgb") + +Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"]) + +# End of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SgiImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SgiImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8dca6e81ad1f61bf3d7b2e9bb51273dfbce0b53 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SgiImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SpiderImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SpiderImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..d502779e2a30b970596e668e299662594ca83bd9 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SpiderImagePlugin.py @@ -0,0 +1,314 @@ +# +# The Python Imaging Library. +# +# SPIDER image file handling +# +# History: +# 2004-08-02 Created BB +# 2006-03-02 added save method +# 2006-03-13 added support for stack images +# +# Copyright (c) 2004 by Health Research Inc. (HRI) RENSSELAER, NY 12144. +# Copyright (c) 2004 by William Baxter. +# Copyright (c) 2004 by Secret Labs AB. +# Copyright (c) 2004 by Fredrik Lundh. +# + +## +# Image plugin for the Spider image format. This format is is used +# by the SPIDER software, in processing image data from electron +# microscopy and tomography. +## + +# +# SpiderImagePlugin.py +# +# The Spider image format is used by SPIDER software, in processing +# image data from electron microscopy and tomography. +# +# Spider home page: +# https://spider.wadsworth.org/spider_doc/spider/docs/spider.html +# +# Details about the Spider image format: +# https://spider.wadsworth.org/spider_doc/spider/docs/image_doc.html +# + +from __future__ import print_function + +from PIL import Image, ImageFile +import os +import struct +import sys + + +def isInt(f): + try: + i = int(f) + if f-i == 0: + return 1 + else: + return 0 + except (ValueError, OverflowError): + return 0 + + +iforms = [1, 3, -11, -12, -21, -22] + + +# There is no magic number to identify Spider files, so just check a +# series of header locations to see if they have reasonable values. +# Returns no. of bytes in the header, if it is a valid Spider header, +# otherwise returns 0 + +def isSpiderHeader(t): + h = (99,) + t # add 1 value so can use spider header index start=1 + # header values 1,2,5,12,13,22,23 should be integers + for i in [1, 2, 5, 12, 13, 22, 23]: + if not isInt(h[i]): + return 0 + # check iform + iform = int(h[5]) + if iform not in iforms: + return 0 + # check other header values + labrec = int(h[13]) # no. records in file header + labbyt = int(h[22]) # total no. of bytes in header + lenbyt = int(h[23]) # record length in bytes + # print("labrec = %d, labbyt = %d, lenbyt = %d" % (labrec,labbyt,lenbyt)) + if labbyt != (labrec * lenbyt): + return 0 + # looks like a valid header + return labbyt + + +def isSpiderImage(filename): + with open(filename, 'rb') as fp: + f = fp.read(92) # read 23 * 4 bytes + t = struct.unpack('>23f', f) # try big-endian first + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + t = struct.unpack('<23f', f) # little-endian + hdrlen = isSpiderHeader(t) + return hdrlen + + +class SpiderImageFile(ImageFile.ImageFile): + + format = "SPIDER" + format_description = "Spider 2D image" + _close_exclusive_fp_after_loading = False + + def _open(self): + # check header + n = 27 * 4 # read 27 float values + f = self.fp.read(n) + + try: + self.bigendian = 1 + t = struct.unpack('>27f', f) # try big-endian first + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + self.bigendian = 0 + t = struct.unpack('<27f', f) # little-endian + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + raise SyntaxError("not a valid Spider file") + except struct.error: + raise SyntaxError("not a valid Spider file") + + h = (99,) + t # add 1 value : spider header index starts at 1 + iform = int(h[5]) + if iform != 1: + raise SyntaxError("not a Spider 2D image") + + self.size = int(h[12]), int(h[2]) # size in pixels (width, height) + self.istack = int(h[24]) + self.imgnumber = int(h[27]) + + if self.istack == 0 and self.imgnumber == 0: + # stk=0, img=0: a regular 2D image + offset = hdrlen + self._nimages = 1 + elif self.istack > 0 and self.imgnumber == 0: + # stk>0, img=0: Opening the stack for the first time + self.imgbytes = int(h[12]) * int(h[2]) * 4 + self.hdrlen = hdrlen + self._nimages = int(h[26]) + # Point to the first image in the stack + offset = hdrlen * 2 + self.imgnumber = 1 + elif self.istack == 0 and self.imgnumber > 0: + # stk=0, img>0: an image within the stack + offset = hdrlen + self.stkoffset + self.istack = 2 # So Image knows it's still a stack + else: + raise SyntaxError("inconsistent stack header values") + + if self.bigendian: + self.rawmode = "F;32BF" + else: + self.rawmode = "F;32F" + self.mode = "F" + + self.tile = [ + ("raw", (0, 0) + self.size, offset, + (self.rawmode, 0, 1))] + self.__fp = self.fp # FIXME: hack + + @property + def n_frames(self): + return self._nimages + + @property + def is_animated(self): + return self._nimages > 1 + + # 1st image index is zero (although SPIDER imgnumber starts at 1) + def tell(self): + if self.imgnumber < 1: + return 0 + else: + return self.imgnumber - 1 + + def seek(self, frame): + if self.istack == 0: + raise EOFError("attempt to seek in a non-stack file") + if not self._seek_check(frame): + return + self.stkoffset = self.hdrlen + frame * (self.hdrlen + self.imgbytes) + self.fp = self.__fp + self.fp.seek(self.stkoffset) + self._open() + + # returns a byte image after rescaling to 0..255 + def convert2byte(self, depth=255): + (minimum, maximum) = self.getextrema() + m = 1 + if maximum != minimum: + m = depth / (maximum-minimum) + b = -m * minimum + return self.point(lambda i, m=m, b=b: i * m + b).convert("L") + + # returns a ImageTk.PhotoImage object, after rescaling to 0..255 + def tkPhotoImage(self): + from PIL import ImageTk + return ImageTk.PhotoImage(self.convert2byte(), palette=256) + + +# -------------------------------------------------------------------- +# Image series + +# given a list of filenames, return a list of images +def loadImageSeries(filelist=None): + """create a list of Image.images for use in montage""" + if filelist is None or len(filelist) < 1: + return + + imglist = [] + for img in filelist: + if not os.path.exists(img): + print("unable to find %s" % img) + continue + try: + im = Image.open(img).convert2byte() + except: + if not isSpiderImage(img): + print(img + " is not a Spider image file") + continue + im.info['filename'] = img + imglist.append(im) + return imglist + + +# -------------------------------------------------------------------- +# For saving images in Spider format + +def makeSpiderHeader(im): + nsam, nrow = im.size + lenbyt = nsam * 4 # There are labrec records in the header + labrec = 1024 / lenbyt + if 1024 % lenbyt != 0: + labrec += 1 + labbyt = labrec * lenbyt + hdr = [] + nvalues = int(labbyt / 4) + for i in range(nvalues): + hdr.append(0.0) + + if len(hdr) < 23: + return [] + + # NB these are Fortran indices + hdr[1] = 1.0 # nslice (=1 for an image) + hdr[2] = float(nrow) # number of rows per slice + hdr[5] = 1.0 # iform for 2D image + hdr[12] = float(nsam) # number of pixels per line + hdr[13] = float(labrec) # number of records in file header + hdr[22] = float(labbyt) # total number of bytes in header + hdr[23] = float(lenbyt) # record length in bytes + + # adjust for Fortran indexing + hdr = hdr[1:] + hdr.append(0.0) + # pack binary data into a string + hdrstr = [] + for v in hdr: + hdrstr.append(struct.pack('f', v)) + return hdrstr + + +def _save(im, fp, filename): + if im.mode[0] != "F": + im = im.convert('F') + + hdr = makeSpiderHeader(im) + if len(hdr) < 256: + raise IOError("Error creating Spider header") + + # write the SPIDER header + fp.writelines(hdr) + + rawmode = "F;32NF" # 32-bit native floating point + ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, 1))]) + + +def _save_spider(im, fp, filename): + # get the filename extension and register it with Image + ext = os.path.splitext(filename)[1] + Image.register_extension(SpiderImageFile.format, ext) + _save(im, fp, filename) + +# -------------------------------------------------------------------- + + +Image.register_open(SpiderImageFile.format, SpiderImageFile) +Image.register_save(SpiderImageFile.format, _save_spider) + +if __name__ == "__main__": + + if len(sys.argv) < 2: + print("Syntax: python SpiderImagePlugin.py [infile] [outfile]") + sys.exit() + + filename = sys.argv[1] + if not isSpiderImage(filename): + print("input image must be in Spider format") + sys.exit() + + im = Image.open(filename) + print("image: " + str(im)) + print("format: " + str(im.format)) + print("size: " + str(im.size)) + print("mode: " + str(im.mode)) + print("max, min: ", end=' ') + print(im.getextrema()) + + if len(sys.argv) > 2: + outfile = sys.argv[2] + + # perform some image operation + im = im.transpose(Image.FLIP_LEFT_RIGHT) + print( + "saving a flipped version of %s as %s " % + (os.path.basename(filename), outfile)) + im.save(outfile, SpiderImageFile.format) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SpiderImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SpiderImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60a74727c39e73bf236664b89f41b197b9febb2b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SpiderImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SunImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SunImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..fd5e82724df892a0b8fc00bf28a22cf4b593aa28 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SunImagePlugin.py @@ -0,0 +1,136 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Sun image file handling +# +# History: +# 1995-09-10 fl Created +# 1996-05-28 fl Fixed 32-bit alignment +# 1998-12-29 fl Import ImagePalette module +# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault) +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1995-1996 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile, ImagePalette +from ._binary import i32be as i32 + +__version__ = "0.3" + + +def _accept(prefix): + return len(prefix) >= 4 and i32(prefix) == 0x59a66a95 + + +## +# Image plugin for Sun raster files. + +class SunImageFile(ImageFile.ImageFile): + + format = "SUN" + format_description = "Sun Raster File" + + def _open(self): + + # The Sun Raster file header is 32 bytes in length + # and has the following format: + + # typedef struct _SunRaster + # { + # DWORD MagicNumber; /* Magic (identification) number */ + # DWORD Width; /* Width of image in pixels */ + # DWORD Height; /* Height of image in pixels */ + # DWORD Depth; /* Number of bits per pixel */ + # DWORD Length; /* Size of image data in bytes */ + # DWORD Type; /* Type of raster file */ + # DWORD ColorMapType; /* Type of color map */ + # DWORD ColorMapLength; /* Size of the color map in bytes */ + # } SUNRASTER; + + # HEAD + s = self.fp.read(32) + if i32(s) != 0x59a66a95: + raise SyntaxError("not an SUN raster file") + + offset = 32 + + self.size = i32(s[4:8]), i32(s[8:12]) + + depth = i32(s[12:16]) + data_length = i32(s[16:20]) # unreliable, ignore. + file_type = i32(s[20:24]) + palette_type = i32(s[24:28]) # 0: None, 1: RGB, 2: Raw/arbitrary + palette_length = i32(s[28:32]) + + if depth == 1: + self.mode, rawmode = "1", "1;I" + elif depth == 4: + self.mode, rawmode = "L", "L;4" + elif depth == 8: + self.mode = rawmode = "L" + elif depth == 24: + if file_type == 3: + self.mode, rawmode = "RGB", "RGB" + else: + self.mode, rawmode = "RGB", "BGR" + elif depth == 32: + if file_type == 3: + self.mode, rawmode = 'RGB', 'RGBX' + else: + self.mode, rawmode = 'RGB', 'BGRX' + else: + raise SyntaxError("Unsupported Mode/Bit Depth") + + if palette_length: + if palette_length > 1024: + raise SyntaxError("Unsupported Color Palette Length") + + if palette_type != 1: + raise SyntaxError("Unsupported Palette Type") + + offset = offset + palette_length + self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length)) + if self.mode == "L": + self.mode = "P" + rawmode = rawmode.replace('L', 'P') + + # 16 bit boundaries on stride + stride = ((self.size[0] * depth + 15) // 16) * 2 + + # file type: Type is the version (or flavor) of the bitmap + # file. The following values are typically found in the Type + # field: + # 0000h Old + # 0001h Standard + # 0002h Byte-encoded + # 0003h RGB format + # 0004h TIFF format + # 0005h IFF format + # FFFFh Experimental + + # Old and standard are the same, except for the length tag. + # byte-encoded is run-length-encoded + # RGB looks similar to standard, but RGB byte order + # TIFF and IFF mean that they were converted from T/IFF + # Experimental means that it's something else. + # (https://www.fileformat.info/format/sunraster/egff.htm) + + if file_type in (0, 1, 3, 4, 5): + self.tile = [("raw", (0, 0)+self.size, offset, (rawmode, stride))] + elif file_type == 2: + self.tile = [("sun_rle", (0, 0)+self.size, offset, rawmode)] + else: + raise SyntaxError('Unsupported Sun Raster file type') + +# +# registry + + +Image.register_open(SunImageFile.format, SunImageFile, _accept) + +Image.register_extension(SunImageFile.format, ".ras") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SunImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SunImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c4d690aa125c282870703242751b0ff3c4756bb Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/SunImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TarIO.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TarIO.py new file mode 100644 index 0000000000000000000000000000000000000000..0e949ff88ebd642f8f77dfb2ff63d33ee39c93ac --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TarIO.py @@ -0,0 +1,56 @@ +# +# The Python Imaging Library. +# $Id$ +# +# read files from within a tar file +# +# History: +# 95-06-18 fl Created +# 96-05-28 fl Open files in binary mode +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995-96. +# +# See the README file for information on usage and redistribution. +# + +from . import ContainerIO + + +## +# A file object that provides read access to a given member of a TAR +# file. + +class TarIO(ContainerIO.ContainerIO): + + def __init__(self, tarfile, file): + """ + Create file object. + + :param tarfile: Name of TAR file. + :param file: Name of member file. + """ + fh = open(tarfile, "rb") + + while True: + + s = fh.read(512) + if len(s) != 512: + raise IOError("unexpected end of tar file") + + name = s[:100].decode('utf-8') + i = name.find('\0') + if i == 0: + raise IOError("cannot find subfile") + if i > 0: + name = name[:i] + + size = int(s[124:135], 8) + + if file == name: + break + + fh.seek((size + 511) & (~511), 1) + + # Open region + ContainerIO.ContainerIO.__init__(self, fh, fh.tell(), size) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TarIO.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TarIO.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abbb77fddc5d1cb4b27ac73bd4464276b62da87f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TarIO.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TgaImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TgaImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..57b6ae2c85b0ed34459189458eae4784b7568723 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TgaImagePlugin.py @@ -0,0 +1,209 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TGA file handling +# +# History: +# 95-09-01 fl created (reads 24-bit files only) +# 97-01-04 fl support more TGA versions, including compressed images +# 98-07-04 fl fixed orientation and alpha layer bugs +# 98-09-11 fl fixed orientation for runlength decoder +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1995-97. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, i16le as i16, o8, o16le as o16 + +__version__ = "0.3" + + +# +# -------------------------------------------------------------------- +# Read RGA file + + +MODES = { + # map imagetype/depth to rawmode + (1, 8): "P", + (3, 1): "1", + (3, 8): "L", + (3, 16): "LA", + (2, 16): "BGR;5", + (2, 24): "BGR", + (2, 32): "BGRA", +} + + +## +# Image plugin for Targa files. + +class TgaImageFile(ImageFile.ImageFile): + + format = "TGA" + format_description = "Targa" + + def _open(self): + + # process header + s = self.fp.read(18) + + idlen = i8(s[0]) + + colormaptype = i8(s[1]) + imagetype = i8(s[2]) + + depth = i8(s[16]) + + flags = i8(s[17]) + + self.size = i16(s[12:]), i16(s[14:]) + + # validate header fields + if colormaptype not in (0, 1) or\ + self.size[0] <= 0 or self.size[1] <= 0 or\ + depth not in (1, 8, 16, 24, 32): + raise SyntaxError("not a TGA file") + + # image mode + if imagetype in (3, 11): + self.mode = "L" + if depth == 1: + self.mode = "1" # ??? + elif depth == 16: + self.mode = "LA" + elif imagetype in (1, 9): + self.mode = "P" + elif imagetype in (2, 10): + self.mode = "RGB" + if depth == 32: + self.mode = "RGBA" + else: + raise SyntaxError("unknown TGA mode") + + # orientation + orientation = flags & 0x30 + if orientation == 0x20: + orientation = 1 + elif not orientation: + orientation = -1 + else: + raise SyntaxError("unknown TGA orientation") + + self.info["orientation"] = orientation + + if imagetype & 8: + self.info["compression"] = "tga_rle" + + if idlen: + self.info["id_section"] = self.fp.read(idlen) + + if colormaptype: + # read palette + start, size, mapdepth = i16(s[3:]), i16(s[5:]), i16(s[7:]) + if mapdepth == 16: + self.palette = ImagePalette.raw( + "BGR;16", b"\0"*2*start + self.fp.read(2*size)) + elif mapdepth == 24: + self.palette = ImagePalette.raw( + "BGR", b"\0"*3*start + self.fp.read(3*size)) + elif mapdepth == 32: + self.palette = ImagePalette.raw( + "BGRA", b"\0"*4*start + self.fp.read(4*size)) + + # setup tile descriptor + try: + rawmode = MODES[(imagetype & 7, depth)] + if imagetype & 8: + # compressed + self.tile = [("tga_rle", (0, 0)+self.size, + self.fp.tell(), (rawmode, orientation, depth))] + else: + self.tile = [("raw", (0, 0)+self.size, + self.fp.tell(), (rawmode, 0, orientation))] + except KeyError: + pass # cannot decode + +# +# -------------------------------------------------------------------- +# Write TGA file + + +SAVE = { + "1": ("1", 1, 0, 3), + "L": ("L", 8, 0, 3), + "LA": ("LA", 16, 0, 3), + "P": ("P", 8, 1, 1), + "RGB": ("BGR", 24, 0, 2), + "RGBA": ("BGRA", 32, 0, 2), +} + + +def _save(im, fp, filename): + + try: + rawmode, bits, colormaptype, imagetype = SAVE[im.mode] + except KeyError: + raise IOError("cannot write mode %s as TGA" % im.mode) + + rle = im.encoderinfo.get("rle", False) + + if rle: + imagetype += 8 + + if colormaptype: + colormapfirst, colormaplength, colormapentry = 0, 256, 24 + else: + colormapfirst, colormaplength, colormapentry = 0, 0, 0 + + if im.mode in ("LA", "RGBA"): + flags = 8 + else: + flags = 0 + + orientation = im.info.get("orientation", -1) + if orientation > 0: + flags = flags | 0x20 + + fp.write(b"\000" + + o8(colormaptype) + + o8(imagetype) + + o16(colormapfirst) + + o16(colormaplength) + + o8(colormapentry) + + o16(0) + + o16(0) + + o16(im.size[0]) + + o16(im.size[1]) + + o8(bits) + + o8(flags)) + + if colormaptype: + fp.write(im.im.getpalette("RGB", "BGR")) + + if rle: + ImageFile._save( + im, + fp, + [("tga_rle", (0, 0) + im.size, 0, (rawmode, orientation))]) + else: + ImageFile._save( + im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))]) + + # write targa version 2 footer + fp.write(b"\000" * 8 + b"TRUEVISION-XFILE." + b"\000") + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(TgaImageFile.format, TgaImageFile) +Image.register_save(TgaImageFile.format, _save) + +Image.register_extension(TgaImageFile.format, ".tga") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TgaImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TgaImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46f3ec9931f82c63c90af65ddbf489393241bc6c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TgaImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..6f032f49d19ab1f931e52e3947b8dd78279aa6e5 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffImagePlugin.py @@ -0,0 +1,1838 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TIFF file handling +# +# TIFF is a flexible, if somewhat aged, image file format originally +# defined by Aldus. Although TIFF supports a wide variety of pixel +# layouts and compression methods, the name doesn't really stand for +# "thousands of incompatible file formats," it just feels that way. +# +# To read TIFF data from a stream, the stream must be seekable. For +# progressive decoding, make sure to use TIFF files where the tag +# directory is placed first in the file. +# +# History: +# 1995-09-01 fl Created +# 1996-05-04 fl Handle JPEGTABLES tag +# 1996-05-18 fl Fixed COLORMAP support +# 1997-01-05 fl Fixed PREDICTOR support +# 1997-08-27 fl Added support for rational tags (from Perry Stoll) +# 1998-01-10 fl Fixed seek/tell (from Jan Blom) +# 1998-07-15 fl Use private names for internal variables +# 1999-06-13 fl Rewritten for PIL 1.0 (1.0) +# 2000-10-11 fl Additional fixes for Python 2.0 (1.1) +# 2001-04-17 fl Fixed rewind support (seek to frame 0) (1.2) +# 2001-05-12 fl Added write support for more tags (from Greg Couch) (1.3) +# 2001-12-18 fl Added workaround for broken Matrox library +# 2002-01-18 fl Don't mess up if photometric tag is missing (D. Alan Stewart) +# 2003-05-19 fl Check FILLORDER tag +# 2003-09-26 fl Added RGBa support +# 2004-02-24 fl Added DPI support; fixed rational write support +# 2005-02-07 fl Added workaround for broken Corel Draw 10 files +# 2006-01-09 fl Added support for float/double tags (from Russell Nelson) +# +# Copyright (c) 1997-2006 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from __future__ import division, print_function + +from . import Image, ImageFile, ImagePalette, TiffTags +from ._binary import i8, o8 +from ._util import py3 + +import collections +from fractions import Fraction +from numbers import Number, Rational + +import io +import itertools +import os +import struct +import sys +import warnings + +from .TiffTags import TYPES + +try: + # Python 3 + from collections.abc import MutableMapping +except ImportError: + # Python 2.7 + from collections import MutableMapping + + +__version__ = "1.3.5" +DEBUG = False # Needs to be merged with the new logging approach. + +# Set these to true to force use of libtiff for reading or writing. +READ_LIBTIFF = False +WRITE_LIBTIFF = False +IFD_LEGACY_API = True + +II = b"II" # little-endian (Intel style) +MM = b"MM" # big-endian (Motorola style) + +# +# -------------------------------------------------------------------- +# Read TIFF files + +# a few tag names, just to make the code below a bit more readable +IMAGEWIDTH = 256 +IMAGELENGTH = 257 +BITSPERSAMPLE = 258 +COMPRESSION = 259 +PHOTOMETRIC_INTERPRETATION = 262 +FILLORDER = 266 +IMAGEDESCRIPTION = 270 +STRIPOFFSETS = 273 +SAMPLESPERPIXEL = 277 +ROWSPERSTRIP = 278 +STRIPBYTECOUNTS = 279 +X_RESOLUTION = 282 +Y_RESOLUTION = 283 +PLANAR_CONFIGURATION = 284 +RESOLUTION_UNIT = 296 +SOFTWARE = 305 +DATE_TIME = 306 +ARTIST = 315 +PREDICTOR = 317 +COLORMAP = 320 +TILEOFFSETS = 324 +EXTRASAMPLES = 338 +SAMPLEFORMAT = 339 +JPEGTABLES = 347 +COPYRIGHT = 33432 +IPTC_NAA_CHUNK = 33723 # newsphoto properties +PHOTOSHOP_CHUNK = 34377 # photoshop properties +ICCPROFILE = 34675 +EXIFIFD = 34665 +XMP = 700 + +# https://github.com/imagej/ImageJA/blob/master/src/main/java/ij/io/TiffDecoder.java +IMAGEJ_META_DATA_BYTE_COUNTS = 50838 +IMAGEJ_META_DATA = 50839 + +COMPRESSION_INFO = { + # Compression => pil compression name + 1: "raw", + 2: "tiff_ccitt", + 3: "group3", + 4: "group4", + 5: "tiff_lzw", + 6: "tiff_jpeg", # obsolete + 7: "jpeg", + 8: "tiff_adobe_deflate", + 32771: "tiff_raw_16", # 16-bit padding + 32773: "packbits", + 32809: "tiff_thunderscan", + 32946: "tiff_deflate", + 34676: "tiff_sgilog", + 34677: "tiff_sgilog24", +} + +COMPRESSION_INFO_REV = {v: k for k, v in COMPRESSION_INFO.items()} + +OPEN_INFO = { + # (ByteOrder, PhotoInterpretation, SampleFormat, FillOrder, BitsPerSample, + # ExtraSamples) => mode, rawmode + (II, 0, (1,), 1, (1,), ()): ("1", "1;I"), + (MM, 0, (1,), 1, (1,), ()): ("1", "1;I"), + (II, 0, (1,), 2, (1,), ()): ("1", "1;IR"), + (MM, 0, (1,), 2, (1,), ()): ("1", "1;IR"), + (II, 1, (1,), 1, (1,), ()): ("1", "1"), + (MM, 1, (1,), 1, (1,), ()): ("1", "1"), + (II, 1, (1,), 2, (1,), ()): ("1", "1;R"), + (MM, 1, (1,), 2, (1,), ()): ("1", "1;R"), + + (II, 0, (1,), 1, (2,), ()): ("L", "L;2I"), + (MM, 0, (1,), 1, (2,), ()): ("L", "L;2I"), + (II, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), + (MM, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), + (II, 1, (1,), 1, (2,), ()): ("L", "L;2"), + (MM, 1, (1,), 1, (2,), ()): ("L", "L;2"), + (II, 1, (1,), 2, (2,), ()): ("L", "L;2R"), + (MM, 1, (1,), 2, (2,), ()): ("L", "L;2R"), + + (II, 0, (1,), 1, (4,), ()): ("L", "L;4I"), + (MM, 0, (1,), 1, (4,), ()): ("L", "L;4I"), + (II, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), + (MM, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), + (II, 1, (1,), 1, (4,), ()): ("L", "L;4"), + (MM, 1, (1,), 1, (4,), ()): ("L", "L;4"), + (II, 1, (1,), 2, (4,), ()): ("L", "L;4R"), + (MM, 1, (1,), 2, (4,), ()): ("L", "L;4R"), + + (II, 0, (1,), 1, (8,), ()): ("L", "L;I"), + (MM, 0, (1,), 1, (8,), ()): ("L", "L;I"), + (II, 0, (1,), 2, (8,), ()): ("L", "L;IR"), + (MM, 0, (1,), 2, (8,), ()): ("L", "L;IR"), + (II, 1, (1,), 1, (8,), ()): ("L", "L"), + (MM, 1, (1,), 1, (8,), ()): ("L", "L"), + (II, 1, (1,), 2, (8,), ()): ("L", "L;R"), + (MM, 1, (1,), 2, (8,), ()): ("L", "L;R"), + + (II, 1, (1,), 1, (12,), ()): ("I;16", "I;12"), + + (II, 1, (1,), 1, (16,), ()): ("I;16", "I;16"), + (MM, 1, (1,), 1, (16,), ()): ("I;16B", "I;16B"), + (II, 1, (2,), 1, (16,), ()): ("I", "I;16S"), + (MM, 1, (2,), 1, (16,), ()): ("I", "I;16BS"), + + (II, 0, (3,), 1, (32,), ()): ("F", "F;32F"), + (MM, 0, (3,), 1, (32,), ()): ("F", "F;32BF"), + (II, 1, (1,), 1, (32,), ()): ("I", "I;32N"), + (II, 1, (2,), 1, (32,), ()): ("I", "I;32S"), + (MM, 1, (2,), 1, (32,), ()): ("I", "I;32BS"), + (II, 1, (3,), 1, (32,), ()): ("F", "F;32F"), + (MM, 1, (3,), 1, (32,), ()): ("F", "F;32BF"), + + (II, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), + (MM, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), + + (II, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), + (MM, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), + (II, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), + (MM, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), + (II, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples + (MM, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples + (II, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGBX", "RGBX"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGBX", "RGBX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGBX", "RGBXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGBX", "RGBXX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGBX", "RGBXXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGBX", "RGBXXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), + (II, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), + (II, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 + (MM, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 + + (II, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16L"), + (MM, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGBX", "RGBX;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGBX", "RGBX;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16B"), + + (II, 3, (1,), 1, (1,), ()): ("P", "P;1"), + (MM, 3, (1,), 1, (1,), ()): ("P", "P;1"), + (II, 3, (1,), 2, (1,), ()): ("P", "P;1R"), + (MM, 3, (1,), 2, (1,), ()): ("P", "P;1R"), + (II, 3, (1,), 1, (2,), ()): ("P", "P;2"), + (MM, 3, (1,), 1, (2,), ()): ("P", "P;2"), + (II, 3, (1,), 2, (2,), ()): ("P", "P;2R"), + (MM, 3, (1,), 2, (2,), ()): ("P", "P;2R"), + (II, 3, (1,), 1, (4,), ()): ("P", "P;4"), + (MM, 3, (1,), 1, (4,), ()): ("P", "P;4"), + (II, 3, (1,), 2, (4,), ()): ("P", "P;4R"), + (MM, 3, (1,), 2, (4,), ()): ("P", "P;4R"), + (II, 3, (1,), 1, (8,), ()): ("P", "P"), + (MM, 3, (1,), 1, (8,), ()): ("P", "P"), + (II, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), + (MM, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), + (II, 3, (1,), 2, (8,), ()): ("P", "P;R"), + (MM, 3, (1,), 2, (8,), ()): ("P", "P;R"), + + (II, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), + (MM, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), + (II, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), + (MM, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), + (II, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), + (MM, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), + + (II, 6, (1,), 1, (8, 8, 8), ()): ("YCbCr", "YCbCr"), + (MM, 6, (1,), 1, (8, 8, 8), ()): ("YCbCr", "YCbCr"), + (II, 6, (1,), 1, (8, 8, 8, 8), (0,)): ("YCbCr", "YCbCrX"), + (MM, 6, (1,), 1, (8, 8, 8, 8), (0,)): ("YCbCr", "YCbCrX"), + (II, 6, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("YCbCr", "YCbCrXXX"), + (MM, 6, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("YCbCr", "YCbCrXXX"), + (II, 6, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("YCbCr", "YCbCrXXX"), + (MM, 6, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("YCbCr", "YCbCrXXX"), + + (II, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), + (MM, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), +} + +PREFIXES = [ + b"MM\x00\x2A", # Valid TIFF header with big-endian byte order + b"II\x2A\x00", # Valid TIFF header with little-endian byte order + b"MM\x2A\x00", # Invalid TIFF header, assume big-endian + b"II\x00\x2A", # Invalid TIFF header, assume little-endian +] + + +def _accept(prefix): + return prefix[:4] in PREFIXES + + +def _limit_rational(val, max_val): + inv = abs(val) > 1 + n_d = IFDRational(1 / val if inv else val).limit_rational(max_val) + return n_d[::-1] if inv else n_d + + +## +# Wrapper for TIFF IFDs. + +_load_dispatch = {} +_write_dispatch = {} + + +class IFDRational(Rational): + """ Implements a rational class where 0/0 is a legal value to match + the in the wild use of exif rationals. + + e.g., DigitalZoomRatio - 0.00/0.00 indicates that no digital zoom was used + """ + + """ If the denominator is 0, store this as a float('nan'), otherwise store + as a fractions.Fraction(). Delegate as appropriate + + """ + + __slots__ = ('_numerator', '_denominator', '_val') + + def __init__(self, value, denominator=1): + """ + :param value: either an integer numerator, a + float/rational/other number, or an IFDRational + :param denominator: Optional integer denominator + """ + self._denominator = denominator + self._numerator = value + self._val = float(1) + + if isinstance(value, Fraction): + self._numerator = value.numerator + self._denominator = value.denominator + self._val = value + + if isinstance(value, IFDRational): + self._denominator = value.denominator + self._numerator = value.numerator + self._val = value._val + return + + if denominator == 0: + self._val = float('nan') + return + + elif denominator == 1: + self._val = Fraction(value) + else: + self._val = Fraction(value, denominator) + + @property + def numerator(a): + return a._numerator + + @property + def denominator(a): + return a._denominator + + def limit_rational(self, max_denominator): + """ + + :param max_denominator: Integer, the maximum denominator value + :returns: Tuple of (numerator, denominator) + """ + + if self.denominator == 0: + return (self.numerator, self.denominator) + + f = self._val.limit_denominator(max_denominator) + return (f.numerator, f.denominator) + + def __repr__(self): + return str(float(self._val)) + + def __hash__(self): + return self._val.__hash__() + + def __eq__(self, other): + return self._val == other + + def _delegate(op): + def delegate(self, *args): + return getattr(self._val, op)(*args) + return delegate + + """ a = ['add','radd', 'sub', 'rsub','div', 'rdiv', 'mul', 'rmul', + 'truediv', 'rtruediv', 'floordiv', + 'rfloordiv','mod','rmod', 'pow','rpow', 'pos', 'neg', + 'abs', 'trunc', 'lt', 'gt', 'le', 'ge', 'nonzero', + 'ceil', 'floor', 'round'] + print("\n".join("__%s__ = _delegate('__%s__')" % (s,s) for s in a)) + """ + + __add__ = _delegate('__add__') + __radd__ = _delegate('__radd__') + __sub__ = _delegate('__sub__') + __rsub__ = _delegate('__rsub__') + __div__ = _delegate('__div__') + __rdiv__ = _delegate('__rdiv__') + __mul__ = _delegate('__mul__') + __rmul__ = _delegate('__rmul__') + __truediv__ = _delegate('__truediv__') + __rtruediv__ = _delegate('__rtruediv__') + __floordiv__ = _delegate('__floordiv__') + __rfloordiv__ = _delegate('__rfloordiv__') + __mod__ = _delegate('__mod__') + __rmod__ = _delegate('__rmod__') + __pow__ = _delegate('__pow__') + __rpow__ = _delegate('__rpow__') + __pos__ = _delegate('__pos__') + __neg__ = _delegate('__neg__') + __abs__ = _delegate('__abs__') + __trunc__ = _delegate('__trunc__') + __lt__ = _delegate('__lt__') + __gt__ = _delegate('__gt__') + __le__ = _delegate('__le__') + __ge__ = _delegate('__ge__') + __nonzero__ = _delegate('__nonzero__') + __ceil__ = _delegate('__ceil__') + __floor__ = _delegate('__floor__') + __round__ = _delegate('__round__') + + +class ImageFileDirectory_v2(MutableMapping): + """This class represents a TIFF tag directory. To speed things up, we + don't decode tags unless they're asked for. + + Exposes a dictionary interface of the tags in the directory:: + + ifd = ImageFileDirectory_v2() + ifd[key] = 'Some Data' + ifd.tagtype[key] = 2 + print(ifd[key]) + 'Some Data' + + Individual values are returned as the strings or numbers, sequences are + returned as tuples of the values. + + The tiff metadata type of each item is stored in a dictionary of + tag types in + `~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype`. The types + are read from a tiff file, guessed from the type added, or added + manually. + + Data Structures: + + * self.tagtype = {} + + * Key: numerical tiff tag number + * Value: integer corresponding to the data type from `~PIL.TiffTags.TYPES` + + .. versionadded:: 3.0.0 + """ + """ + Documentation: + + 'internal' data structures: + * self._tags_v2 = {} Key: numerical tiff tag number + Value: decoded data, as tuple for multiple values + * self._tagdata = {} Key: numerical tiff tag number + Value: undecoded byte string from file + * self._tags_v1 = {} Key: numerical tiff tag number + Value: decoded data in the v1 format + + Tags will be found in the private attributes self._tagdata, and in + self._tags_v2 once decoded. + + Self.legacy_api is a value for internal use, and shouldn't be + changed from outside code. In cooperation with the + ImageFileDirectory_v1 class, if legacy_api is true, then decoded + tags will be populated into both _tags_v1 and _tags_v2. _Tags_v2 + will be used if this IFD is used in the TIFF save routine. Tags + should be read from tags_v1 if legacy_api == true. + + """ + + def __init__(self, ifh=b"II\052\0\0\0\0\0", prefix=None): + """Initialize an ImageFileDirectory. + + To construct an ImageFileDirectory from a real file, pass the 8-byte + magic header to the constructor. To only set the endianness, pass it + as the 'prefix' keyword argument. + + :param ifh: One of the accepted magic headers (cf. PREFIXES); also sets + endianness. + :param prefix: Override the endianness of the file. + """ + if ifh[:4] not in PREFIXES: + raise SyntaxError("not a TIFF file (header %r not valid)" % ifh) + self._prefix = prefix if prefix is not None else ifh[:2] + if self._prefix == MM: + self._endian = ">" + elif self._prefix == II: + self._endian = "<" + else: + raise SyntaxError("not a TIFF IFD") + self.reset() + self.next, = self._unpack("L", ifh[4:]) + self._legacy_api = False + + prefix = property(lambda self: self._prefix) + offset = property(lambda self: self._offset) + legacy_api = property(lambda self: self._legacy_api) + + @legacy_api.setter + def legacy_api(self, value): + raise Exception("Not allowing setting of legacy api") + + def reset(self): + self._tags_v1 = {} # will remain empty if legacy_api is false + self._tags_v2 = {} # main tag storage + self._tagdata = {} + self.tagtype = {} # added 2008-06-05 by Florian Hoech + self._next = None + self._offset = None + + def __str__(self): + return str(dict(self)) + + def named(self): + """ + :returns: dict of name|key: value + + Returns the complete tag dictionary, with named tags where possible. + """ + return dict((TiffTags.lookup(code).name, value) + for code, value in self.items()) + + def __len__(self): + return len(set(self._tagdata) | set(self._tags_v2)) + + def __getitem__(self, tag): + if tag not in self._tags_v2: # unpack on the fly + data = self._tagdata[tag] + typ = self.tagtype[tag] + size, handler = self._load_dispatch[typ] + self[tag] = handler(self, data, self.legacy_api) # check type + val = self._tags_v2[tag] + if self.legacy_api and not isinstance(val, (tuple, bytes)): + val = val, + return val + + def __contains__(self, tag): + return tag in self._tags_v2 or tag in self._tagdata + + if not py3: + def has_key(self, tag): + return tag in self + + def __setitem__(self, tag, value): + self._setitem(tag, value, self.legacy_api) + + def _setitem(self, tag, value, legacy_api): + basetypes = (Number, bytes, str) + if not py3: + basetypes += unicode, + + info = TiffTags.lookup(tag) + values = [value] if isinstance(value, basetypes) else value + + if tag not in self.tagtype: + if info.type: + self.tagtype[tag] = info.type + else: + self.tagtype[tag] = 7 + if all(isinstance(v, IFDRational) for v in values): + self.tagtype[tag] = 5 + elif all(isinstance(v, int) for v in values): + if all(v < 2 ** 16 for v in values): + self.tagtype[tag] = 3 + else: + self.tagtype[tag] = 4 + elif all(isinstance(v, float) for v in values): + self.tagtype[tag] = 12 + else: + if py3: + if all(isinstance(v, str) for v in values): + self.tagtype[tag] = 2 + else: + # Never treat data as binary by default on Python 2. + self.tagtype[tag] = 2 + + if self.tagtype[tag] == 7 and py3: + values = [value.encode("ascii", 'replace') if isinstance( + value, str) else value] + + values = tuple(info.cvt_enum(value) for value in values) + + dest = self._tags_v1 if legacy_api else self._tags_v2 + + # Three branches: + # Spec'd length == 1, Actual length 1, store as element + # Spec'd length == 1, Actual > 1, Warn and truncate. Formerly barfed. + # No Spec, Actual length 1, Formerly (<4.2) returned a 1 element tuple. + # Don't mess with the legacy api, since it's frozen. + if ((info.length == 1) or + (info.length is None and len(values) == 1 and not legacy_api)): + # Don't mess with the legacy api, since it's frozen. + if legacy_api and self.tagtype[tag] in [5, 10]: # rationals + values = values, + try: + dest[tag], = values + except ValueError: + # We've got a builtin tag with 1 expected entry + warnings.warn( + "Metadata Warning, tag %s had too many entries: %s, expected 1" % ( + tag, len(values))) + dest[tag] = values[0] + + else: + # Spec'd length > 1 or undefined + # Unspec'd, and length > 1 + dest[tag] = values + + def __delitem__(self, tag): + self._tags_v2.pop(tag, None) + self._tags_v1.pop(tag, None) + self._tagdata.pop(tag, None) + + def __iter__(self): + return iter(set(self._tagdata) | set(self._tags_v2)) + + def _unpack(self, fmt, data): + return struct.unpack(self._endian + fmt, data) + + def _pack(self, fmt, *values): + return struct.pack(self._endian + fmt, *values) + + def _register_loader(idx, size): + def decorator(func): + from .TiffTags import TYPES + if func.__name__.startswith("load_"): + TYPES[idx] = func.__name__[5:].replace("_", " ") + _load_dispatch[idx] = size, func + return func + return decorator + + def _register_writer(idx): + def decorator(func): + _write_dispatch[idx] = func + return func + return decorator + + def _register_basic(idx_fmt_name): + from .TiffTags import TYPES + idx, fmt, name = idx_fmt_name + TYPES[idx] = name + size = struct.calcsize("=" + fmt) + _load_dispatch[idx] = size, lambda self, data, legacy_api=True: ( + self._unpack("{}{}".format(len(data) // size, fmt), data)) + _write_dispatch[idx] = lambda self, *values: ( + b"".join(self._pack(fmt, value) for value in values)) + + list(map(_register_basic, + [(3, "H", "short"), + (4, "L", "long"), + (6, "b", "signed byte"), + (8, "h", "signed short"), + (9, "l", "signed long"), + (11, "f", "float"), + (12, "d", "double")])) + + @_register_loader(1, 1) # Basic type, except for the legacy API. + def load_byte(self, data, legacy_api=True): + return data + + @_register_writer(1) # Basic type, except for the legacy API. + def write_byte(self, data): + return data + + @_register_loader(2, 1) + def load_string(self, data, legacy_api=True): + if data.endswith(b"\0"): + data = data[:-1] + return data.decode("latin-1", "replace") + + @_register_writer(2) + def write_string(self, value): + # remerge of https://github.com/python-pillow/Pillow/pull/1416 + if sys.version_info.major == 2: + value = value.decode('ascii', 'replace') + return b"" + value.encode('ascii', 'replace') + b"\0" + + @_register_loader(5, 8) + def load_rational(self, data, legacy_api=True): + vals = self._unpack("{}L".format(len(data) // 4), data) + + def combine(a, b): return (a, b) if legacy_api else IFDRational(a, b) + return tuple(combine(num, denom) + for num, denom in zip(vals[::2], vals[1::2])) + + @_register_writer(5) + def write_rational(self, *values): + return b"".join(self._pack("2L", *_limit_rational(frac, 2 ** 31)) + for frac in values) + + @_register_loader(7, 1) + def load_undefined(self, data, legacy_api=True): + return data + + @_register_writer(7) + def write_undefined(self, value): + return value + + @_register_loader(10, 8) + def load_signed_rational(self, data, legacy_api=True): + vals = self._unpack("{}l".format(len(data) // 4), data) + + def combine(a, b): return (a, b) if legacy_api else IFDRational(a, b) + return tuple(combine(num, denom) + for num, denom in zip(vals[::2], vals[1::2])) + + @_register_writer(10) + def write_signed_rational(self, *values): + return b"".join(self._pack("2L", *_limit_rational(frac, 2 ** 30)) + for frac in values) + + def _ensure_read(self, fp, size): + ret = fp.read(size) + if len(ret) != size: + raise IOError("Corrupt EXIF data. " + + "Expecting to read %d bytes but only got %d. " % + (size, len(ret))) + return ret + + def load(self, fp): + + self.reset() + self._offset = fp.tell() + + try: + for i in range(self._unpack("H", self._ensure_read(fp, 2))[0]): + tag, typ, count, data = self._unpack("HHL4s", + self._ensure_read(fp, 12)) + if DEBUG: + tagname = TiffTags.lookup(tag).name + typname = TYPES.get(typ, "unknown") + print("tag: %s (%d) - type: %s (%d)" % + (tagname, tag, typname, typ), end=" ") + + try: + unit_size, handler = self._load_dispatch[typ] + except KeyError: + if DEBUG: + print("- unsupported type", typ) + continue # ignore unsupported type + size = count * unit_size + if size > 4: + here = fp.tell() + offset, = self._unpack("L", data) + if DEBUG: + print("Tag Location: %s - Data Location: %s" % + (here, offset), end=" ") + fp.seek(offset) + data = ImageFile._safe_read(fp, size) + fp.seek(here) + else: + data = data[:size] + + if len(data) != size: + warnings.warn("Possibly corrupt EXIF data. " + "Expecting to read %d bytes but only got %d." + " Skipping tag %s" % (size, len(data), tag)) + continue + + if not data: + continue + + self._tagdata[tag] = data + self.tagtype[tag] = typ + + if DEBUG: + if size > 32: + print("- value: " % size) + else: + print("- value:", self[tag]) + + self.next, = self._unpack("L", self._ensure_read(fp, 4)) + except IOError as msg: + warnings.warn(str(msg)) + return + + def save(self, fp): + + if fp.tell() == 0: # skip TIFF header on subsequent pages + # tiff header -- PIL always starts the first IFD at offset 8 + fp.write(self._prefix + self._pack("HL", 42, 8)) + + # FIXME What about tagdata? + fp.write(self._pack("H", len(self._tags_v2))) + + entries = [] + offset = fp.tell() + len(self._tags_v2) * 12 + 4 + stripoffsets = None + + # pass 1: convert tags to binary format + # always write tags in ascending order + for tag, value in sorted(self._tags_v2.items()): + if tag == STRIPOFFSETS: + stripoffsets = len(entries) + typ = self.tagtype.get(tag) + if DEBUG: + print("Tag %s, Type: %s, Value: %s" % (tag, typ, value)) + values = value if isinstance(value, tuple) else (value,) + data = self._write_dispatch[typ](self, *values) + if DEBUG: + tagname = TiffTags.lookup(tag).name + typname = TYPES.get(typ, "unknown") + print("save: %s (%d) - type: %s (%d)" % + (tagname, tag, typname, typ), end=" ") + if len(data) >= 16: + print("- value: " % len(data)) + else: + print("- value:", values) + + # count is sum of lengths for string and arbitrary data + count = len(data) if typ in [2, 7] else len(values) + # figure out if data fits into the entry + if len(data) <= 4: + entries.append((tag, typ, count, data.ljust(4, b"\0"), b"")) + else: + entries.append((tag, typ, count, self._pack("L", offset), + data)) + offset += (len(data) + 1) // 2 * 2 # pad to word + + # update strip offset data to point beyond auxiliary data + if stripoffsets is not None: + tag, typ, count, value, data = entries[stripoffsets] + if data: + raise NotImplementedError( + "multistrip support not yet implemented") + value = self._pack("L", self._unpack("L", value)[0] + offset) + entries[stripoffsets] = tag, typ, count, value, data + + # pass 2: write entries to file + for tag, typ, count, value, data in entries: + if DEBUG > 1: + print(tag, typ, count, repr(value), repr(data)) + fp.write(self._pack("HHL4s", tag, typ, count, value)) + + # -- overwrite here for multi-page -- + fp.write(b"\0\0\0\0") # end of entries + + # pass 3: write auxiliary data to file + for tag, typ, count, value, data in entries: + fp.write(data) + if len(data) & 1: + fp.write(b"\0") + + return offset + + +ImageFileDirectory_v2._load_dispatch = _load_dispatch +ImageFileDirectory_v2._write_dispatch = _write_dispatch +for idx, name in TYPES.items(): + name = name.replace(" ", "_") + setattr(ImageFileDirectory_v2, "load_" + name, _load_dispatch[idx][1]) + setattr(ImageFileDirectory_v2, "write_" + name, _write_dispatch[idx]) +del _load_dispatch, _write_dispatch, idx, name + + +# Legacy ImageFileDirectory support. +class ImageFileDirectory_v1(ImageFileDirectory_v2): + """This class represents the **legacy** interface to a TIFF tag directory. + + Exposes a dictionary interface of the tags in the directory:: + + ifd = ImageFileDirectory_v1() + ifd[key] = 'Some Data' + ifd.tagtype[key] = 2 + print(ifd[key]) + ('Some Data',) + + Also contains a dictionary of tag types as read from the tiff image file, + `~PIL.TiffImagePlugin.ImageFileDirectory_v1.tagtype`. + + Values are returned as a tuple. + + .. deprecated:: 3.0.0 + """ + def __init__(self, *args, **kwargs): + ImageFileDirectory_v2.__init__(self, *args, **kwargs) + self._legacy_api = True + + tags = property(lambda self: self._tags_v1) + tagdata = property(lambda self: self._tagdata) + + @classmethod + def from_v2(cls, original): + """ Returns an + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + instance with the same data as is contained in the original + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + instance. + + :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + + """ + + ifd = cls(prefix=original.prefix) + ifd._tagdata = original._tagdata + ifd.tagtype = original.tagtype + ifd.next = original.next # an indicator for multipage tiffs + return ifd + + def to_v2(self): + """ Returns an + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + instance with the same data as is contained in the original + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + instance. + + :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + + """ + + ifd = ImageFileDirectory_v2(prefix=self.prefix) + ifd._tagdata = dict(self._tagdata) + ifd.tagtype = dict(self.tagtype) + ifd._tags_v2 = dict(self._tags_v2) + return ifd + + def __contains__(self, tag): + return tag in self._tags_v1 or tag in self._tagdata + + def __len__(self): + return len(set(self._tagdata) | set(self._tags_v1)) + + def __iter__(self): + return iter(set(self._tagdata) | set(self._tags_v1)) + + def __setitem__(self, tag, value): + for legacy_api in (False, True): + self._setitem(tag, value, legacy_api) + + def __getitem__(self, tag): + if tag not in self._tags_v1: # unpack on the fly + data = self._tagdata[tag] + typ = self.tagtype[tag] + size, handler = self._load_dispatch[typ] + for legacy in (False, True): + self._setitem(tag, handler(self, data, legacy), legacy) + val = self._tags_v1[tag] + if not isinstance(val, (tuple, bytes)): + val = val, + return val + + +# undone -- switch this pointer when IFD_LEGACY_API == False +ImageFileDirectory = ImageFileDirectory_v1 + + +## +# Image plugin for TIFF files. + +class TiffImageFile(ImageFile.ImageFile): + + format = "TIFF" + format_description = "Adobe TIFF" + _close_exclusive_fp_after_loading = False + + def _open(self): + "Open the first image in a TIFF file" + + # Header + ifh = self.fp.read(8) + + # image file directory (tag dictionary) + self.tag_v2 = ImageFileDirectory_v2(ifh) + + # legacy tag/ifd entries will be filled in later + self.tag = self.ifd = None + + # setup frame pointers + self.__first = self.__next = self.tag_v2.next + self.__frame = -1 + self.__fp = self.fp + self._frame_pos = [] + self._n_frames = None + self._is_animated = None + + if DEBUG: + print("*** TiffImageFile._open ***") + print("- __first:", self.__first) + print("- ifh: ", ifh) + + # and load the first frame + self._seek(0) + + @property + def n_frames(self): + if self._n_frames is None: + current = self.tell() + try: + while True: + self._seek(self.tell() + 1) + except EOFError: + self._n_frames = self.tell() + 1 + self.seek(current) + return self._n_frames + + @property + def is_animated(self): + if self._is_animated is None: + if self._n_frames is not None: + self._is_animated = self._n_frames != 1 + else: + current = self.tell() + + try: + self.seek(1) + self._is_animated = True + except EOFError: + self._is_animated = False + + self.seek(current) + return self._is_animated + + def seek(self, frame): + "Select a given frame as current image" + if not self._seek_check(frame): + return + self._seek(frame) + # Create a new core image object on second and + # subsequent frames in the image. Image may be + # different size/mode. + Image._decompression_bomb_check(self.size) + self.im = Image.core.new(self.mode, self.size) + + def _seek(self, frame): + self.fp = self.__fp + while len(self._frame_pos) <= frame: + if not self.__next: + raise EOFError("no more images in TIFF file") + if DEBUG: + print("Seeking to frame %s, on frame %s, " + "__next %s, location: %s" % + (frame, self.__frame, self.__next, self.fp.tell())) + # reset python3 buffered io handle in case fp + # was passed to libtiff, invalidating the buffer + self.fp.tell() + self.fp.seek(self.__next) + self._frame_pos.append(self.__next) + if DEBUG: + print("Loading tags, location: %s" % self.fp.tell()) + self.tag_v2.load(self.fp) + self.__next = self.tag_v2.next + self.__frame += 1 + self.fp.seek(self._frame_pos[frame]) + self.tag_v2.load(self.fp) + self.__next = self.tag_v2.next + # fill the legacy tag/ifd entries + self.tag = self.ifd = ImageFileDirectory_v1.from_v2(self.tag_v2) + self.__frame = frame + self._setup() + + def tell(self): + "Return the current frame number" + return self.__frame + + def _decoder(self, rawmode, layer, tile=None): + "Setup decoder contexts" + + args = None + if rawmode == "RGB" and self._planar_configuration == 2: + rawmode = rawmode[layer] + compression = self._compression + if compression == "raw": + args = (rawmode, 0, 1) + elif compression == "packbits": + args = rawmode + + return args + + def load(self): + if self.use_load_libtiff: + return self._load_libtiff() + return super(TiffImageFile, self).load() + + def load_end(self): + # allow closing if we're on the first frame, there's no next + # This is the ImageFile.load path only, libtiff specific below. + if self.__frame == 0 and not self.__next: + self._close_exclusive_fp_after_loading = True + + def _load_libtiff(self): + """ Overload method triggered when we detect a compressed tiff + Calls out to libtiff """ + + pixel = Image.Image.load(self) + + if self.tile is None: + raise IOError("cannot load this image") + if not self.tile: + return pixel + + self.load_prepare() + + if not len(self.tile) == 1: + raise IOError("Not exactly one tile") + + # (self._compression, (extents tuple), + # 0, (rawmode, self._compression, fp)) + extents = self.tile[0][1] + args = list(self.tile[0][3]) + [self.tag_v2.offset] + + # To be nice on memory footprint, if there's a + # file descriptor, use that instead of reading + # into a string in python. + # libtiff closes the file descriptor, so pass in a dup. + try: + fp = hasattr(self.fp, "fileno") and os.dup(self.fp.fileno()) + # flush the file descriptor, prevents error on pypy 2.4+ + # should also eliminate the need for fp.tell for py3 + # in _seek + if hasattr(self.fp, "flush"): + self.fp.flush() + except IOError: + # io.BytesIO have a fileno, but returns an IOError if + # it doesn't use a file descriptor. + fp = False + + if fp: + args[2] = fp + + decoder = Image._getdecoder(self.mode, 'libtiff', tuple(args), + self.decoderconfig) + try: + decoder.setimage(self.im, extents) + except ValueError: + raise IOError("Couldn't set the image") + + if hasattr(self.fp, "getvalue"): + # We've got a stringio like thing passed in. Yay for all in memory. + # The decoder needs the entire file in one shot, so there's not + # a lot we can do here other than give it the entire file. + # unless we could do something like get the address of the + # underlying string for stringio. + # + # Rearranging for supporting byteio items, since they have a fileno + # that returns an IOError if there's no underlying fp. Easier to + # deal with here by reordering. + if DEBUG: + print("have getvalue. just sending in a string from getvalue") + n, err = decoder.decode(self.fp.getvalue()) + elif hasattr(self.fp, "fileno"): + # we've got a actual file on disk, pass in the fp. + if DEBUG: + print("have fileno, calling fileno version of the decoder.") + self.fp.seek(0) + # 4 bytes, otherwise the trace might error out + n, err = decoder.decode(b"fpfp") + else: + # we have something else. + if DEBUG: + print("don't have fileno or getvalue. just reading") + # UNDONE -- so much for that buffer size thing. + n, err = decoder.decode(self.fp.read()) + + self.tile = [] + self.readonly = 0 + # libtiff closed the fp in a, we need to close self.fp, if possible + if self._exclusive_fp: + if self.__frame == 0 and not self.__next: + self.fp.close() + self.fp = None # might be shared + + if err < 0: + raise IOError(err) + + return Image.Image.load(self) + + def _setup(self): + "Setup this image object based on current tags" + + if 0xBC01 in self.tag_v2: + raise IOError("Windows Media Photo files not yet supported") + + # extract relevant tags + self._compression = COMPRESSION_INFO[self.tag_v2.get(COMPRESSION, 1)] + self._planar_configuration = self.tag_v2.get(PLANAR_CONFIGURATION, 1) + + # photometric is a required tag, but not everyone is reading + # the specification + photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0) + + fillorder = self.tag_v2.get(FILLORDER, 1) + + if DEBUG: + print("*** Summary ***") + print("- compression:", self._compression) + print("- photometric_interpretation:", photo) + print("- planar_configuration:", self._planar_configuration) + print("- fill_order:", fillorder) + + # size + xsize = self.tag_v2.get(IMAGEWIDTH) + ysize = self.tag_v2.get(IMAGELENGTH) + self.size = xsize, ysize + + if DEBUG: + print("- size:", self.size) + + sampleFormat = self.tag_v2.get(SAMPLEFORMAT, (1,)) + if (len(sampleFormat) > 1 + and max(sampleFormat) == min(sampleFormat) == 1): + # SAMPLEFORMAT is properly per band, so an RGB image will + # be (1,1,1). But, we don't support per band pixel types, + # and anything more than one band is a uint8. So, just + # take the first element. Revisit this if adding support + # for more exotic images. + sampleFormat = (1,) + + bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,)) + extra_tuple = self.tag_v2.get(EXTRASAMPLES, ()) + if photo in (2, 6, 8): # RGB, YCbCr, LAB + bps_count = 3 + elif photo == 5: # CMYK + bps_count = 4 + else: + bps_count = 1 + bps_count += len(extra_tuple) + # Some files have only one value in bps_tuple, + # while should have more. Fix it + if bps_count > len(bps_tuple) and len(bps_tuple) == 1: + bps_tuple = bps_tuple * bps_count + + # mode: check photometric interpretation and bits per pixel + key = (self.tag_v2.prefix, photo, sampleFormat, fillorder, + bps_tuple, extra_tuple) + if DEBUG: + print("format key:", key) + try: + self.mode, rawmode = OPEN_INFO[key] + except KeyError: + if DEBUG: + print("- unsupported format") + raise SyntaxError("unknown pixel mode") + + if DEBUG: + print("- raw mode:", rawmode) + print("- pil mode:", self.mode) + + self.info["compression"] = self._compression + + xres = self.tag_v2.get(X_RESOLUTION, 1) + yres = self.tag_v2.get(Y_RESOLUTION, 1) + + if xres and yres: + resunit = self.tag_v2.get(RESOLUTION_UNIT) + if resunit == 2: # dots per inch + self.info["dpi"] = xres, yres + elif resunit == 3: # dots per centimeter. convert to dpi + self.info["dpi"] = xres * 2.54, yres * 2.54 + elif resunit is None: # used to default to 1, but now 2) + self.info["dpi"] = xres, yres + # For backward compatibility, + # we also preserve the old behavior + self.info["resolution"] = xres, yres + else: # No absolute unit of measurement + self.info["resolution"] = xres, yres + + # build tile descriptors + x = y = l = 0 + self.tile = [] + self.use_load_libtiff = False + if STRIPOFFSETS in self.tag_v2: + # striped image + offsets = self.tag_v2[STRIPOFFSETS] + h = self.tag_v2.get(ROWSPERSTRIP, ysize) + w = self.size[0] + if READ_LIBTIFF or self._compression != 'raw': + # if DEBUG: + # print("Activating g4 compression for whole file") + + # Decoder expects entire file as one tile. + # There's a buffer size limit in load (64k) + # so large g4 images will fail if we use that + # function. + # + # Setup the one tile for the whole image, then + # use the _load_libtiff function. + + self.use_load_libtiff = True + + # libtiff handles the fillmode for us, so 1;IR should + # actually be 1;I. Including the R double reverses the + # bits, so stripes of the image are reversed. See + # https://github.com/python-pillow/Pillow/issues/279 + if fillorder == 2: + key = ( + self.tag_v2.prefix, photo, sampleFormat, 1, + self.tag_v2.get(BITSPERSAMPLE, (1,)), + self.tag_v2.get(EXTRASAMPLES, ()) + ) + if DEBUG: + print("format key:", key) + # this should always work, since all the + # fillorder==2 modes have a corresponding + # fillorder=1 mode + self.mode, rawmode = OPEN_INFO[key] + # libtiff always returns the bytes in native order. + # we're expecting image byte order. So, if the rawmode + # contains I;16, we need to convert from native to image + # byte order. + if rawmode == 'I;16': + rawmode = 'I;16N' + if ';16B' in rawmode: + rawmode = rawmode.replace(';16B', ';16N') + if ';16L' in rawmode: + rawmode = rawmode.replace(';16L', ';16N') + + # Offset in the tile tuple is 0, we go from 0,0 to + # w,h, and we only do this once -- eds + a = (rawmode, self._compression, False) + self.tile.append( + (self._compression, + (0, 0, w, ysize), + 0, a)) + a = None + + else: + for i, offset in enumerate(offsets): + a = self._decoder(rawmode, l, i) + self.tile.append( + (self._compression, + (0, min(y, ysize), w, min(y+h, ysize)), + offset, a)) + if DEBUG: + print("tiles: ", self.tile) + y = y + h + if y >= self.size[1]: + x = y = 0 + l += 1 + a = None + elif TILEOFFSETS in self.tag_v2: + # tiled image + w = self.tag_v2.get(322) + h = self.tag_v2.get(323) + a = None + for o in self.tag_v2[TILEOFFSETS]: + if not a: + a = self._decoder(rawmode, l) + # FIXME: this doesn't work if the image size + # is not a multiple of the tile size... + self.tile.append( + (self._compression, + (x, y, x+w, y+h), + o, a)) + x = x + w + if x >= self.size[0]: + x, y = 0, y + h + if y >= self.size[1]: + x = y = 0 + l += 1 + a = None + else: + if DEBUG: + print("- unsupported data organization") + raise SyntaxError("unknown data organization") + + # Fix up info. + if ICCPROFILE in self.tag_v2: + self.info['icc_profile'] = self.tag_v2[ICCPROFILE] + + # fixup palette descriptor + + if self.mode == "P": + palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]] + self.palette = ImagePalette.raw("RGB;L", b"".join(palette)) + + +# +# -------------------------------------------------------------------- +# Write TIFF files + +# little endian is default except for image modes with +# explicit big endian byte-order + +SAVE_INFO = { + # mode => rawmode, byteorder, photometrics, + # sampleformat, bitspersample, extra + "1": ("1", II, 1, 1, (1,), None), + "L": ("L", II, 1, 1, (8,), None), + "LA": ("LA", II, 1, 1, (8, 8), 2), + "P": ("P", II, 3, 1, (8,), None), + "PA": ("PA", II, 3, 1, (8, 8), 2), + "I": ("I;32S", II, 1, 2, (32,), None), + "I;16": ("I;16", II, 1, 1, (16,), None), + "I;16S": ("I;16S", II, 1, 2, (16,), None), + "F": ("F;32F", II, 1, 3, (32,), None), + "RGB": ("RGB", II, 2, 1, (8, 8, 8), None), + "RGBX": ("RGBX", II, 2, 1, (8, 8, 8, 8), 0), + "RGBA": ("RGBA", II, 2, 1, (8, 8, 8, 8), 2), + "CMYK": ("CMYK", II, 5, 1, (8, 8, 8, 8), None), + "YCbCr": ("YCbCr", II, 6, 1, (8, 8, 8), None), + "LAB": ("LAB", II, 8, 1, (8, 8, 8), None), + + "I;32BS": ("I;32BS", MM, 1, 2, (32,), None), + "I;16B": ("I;16B", MM, 1, 1, (16,), None), + "I;16BS": ("I;16BS", MM, 1, 2, (16,), None), + "F;32BF": ("F;32BF", MM, 1, 3, (32,), None), +} + + +def _save(im, fp, filename): + + try: + rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode] + except KeyError: + raise IOError("cannot write mode %s as TIFF" % im.mode) + + ifd = ImageFileDirectory_v2(prefix=prefix) + + compression = im.encoderinfo.get('compression', + im.info.get('compression', 'raw')) + + libtiff = WRITE_LIBTIFF or compression != 'raw' + + # required for color libtiff images + ifd[PLANAR_CONFIGURATION] = getattr(im, '_planar_configuration', 1) + + ifd[IMAGEWIDTH] = im.size[0] + ifd[IMAGELENGTH] = im.size[1] + + # write any arbitrary tags passed in as an ImageFileDirectory + info = im.encoderinfo.get("tiffinfo", {}) + if DEBUG: + print("Tiffinfo Keys: %s" % list(info)) + if isinstance(info, ImageFileDirectory_v1): + info = info.to_v2() + for key in info: + ifd[key] = info.get(key) + try: + ifd.tagtype[key] = info.tagtype[key] + except: + pass # might not be an IFD, Might not have populated type + + # additions written by Greg Couch, gregc@cgl.ucsf.edu + # inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com + if hasattr(im, 'tag_v2'): + # preserve tags from original TIFF image file + for key in (RESOLUTION_UNIT, X_RESOLUTION, Y_RESOLUTION, + IPTC_NAA_CHUNK, PHOTOSHOP_CHUNK, XMP): + if key in im.tag_v2: + ifd[key] = im.tag_v2[key] + ifd.tagtype[key] = im.tag_v2.tagtype[key] + + # preserve ICC profile (should also work when saving other formats + # which support profiles as TIFF) -- 2008-06-06 Florian Hoech + if "icc_profile" in im.info: + ifd[ICCPROFILE] = im.info["icc_profile"] + + for key, name in [(IMAGEDESCRIPTION, "description"), + (X_RESOLUTION, "resolution"), + (Y_RESOLUTION, "resolution"), + (X_RESOLUTION, "x_resolution"), + (Y_RESOLUTION, "y_resolution"), + (RESOLUTION_UNIT, "resolution_unit"), + (SOFTWARE, "software"), + (DATE_TIME, "date_time"), + (ARTIST, "artist"), + (COPYRIGHT, "copyright")]: + if name in im.encoderinfo: + ifd[key] = im.encoderinfo[name] + + dpi = im.encoderinfo.get("dpi") + if dpi: + ifd[RESOLUTION_UNIT] = 2 + ifd[X_RESOLUTION] = dpi[0] + ifd[Y_RESOLUTION] = dpi[1] + + if bits != (1,): + ifd[BITSPERSAMPLE] = bits + if len(bits) != 1: + ifd[SAMPLESPERPIXEL] = len(bits) + if extra is not None: + ifd[EXTRASAMPLES] = extra + if format != 1: + ifd[SAMPLEFORMAT] = format + + ifd[PHOTOMETRIC_INTERPRETATION] = photo + + if im.mode == "P": + lut = im.im.getpalette("RGB", "RGB;L") + ifd[COLORMAP] = tuple(i8(v) * 256 for v in lut) + # data orientation + stride = len(bits) * ((im.size[0]*bits[0]+7)//8) + ifd[ROWSPERSTRIP] = im.size[1] + ifd[STRIPBYTECOUNTS] = stride * im.size[1] + ifd[STRIPOFFSETS] = 0 # this is adjusted by IFD writer + # no compression by default: + ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1) + + if libtiff: + if DEBUG: + print("Saving using libtiff encoder") + print("Items: %s" % sorted(ifd.items())) + _fp = 0 + if hasattr(fp, "fileno"): + try: + fp.seek(0) + _fp = os.dup(fp.fileno()) + except io.UnsupportedOperation: + pass + + # STRIPOFFSETS and STRIPBYTECOUNTS are added by the library + # based on the data in the strip. + blocklist = [STRIPOFFSETS, STRIPBYTECOUNTS] + atts = {} + # bits per sample is a single short in the tiff directory, not a list. + atts[BITSPERSAMPLE] = bits[0] + # Merge the ones that we have with (optional) more bits from + # the original file, e.g x,y resolution so that we can + # save(load('')) == original file. + legacy_ifd = {} + if hasattr(im, 'tag'): + legacy_ifd = im.tag.to_v2() + for tag, value in itertools.chain(ifd.items(), + getattr(im, 'tag_v2', {}).items(), + legacy_ifd.items()): + # Libtiff can only process certain core items without adding + # them to the custom dictionary. It will segfault if it attempts + # to add a custom tag without the dictionary entry + # + # UNDONE -- add code for the custom dictionary + if tag not in TiffTags.LIBTIFF_CORE: + continue + if tag not in atts and tag not in blocklist: + if isinstance(value, str if py3 else unicode): + atts[tag] = value.encode('ascii', 'replace') + b"\0" + elif isinstance(value, IFDRational): + atts[tag] = float(value) + else: + atts[tag] = value + + if DEBUG: + print("Converted items: %s" % sorted(atts.items())) + + # libtiff always expects the bytes in native order. + # we're storing image byte order. So, if the rawmode + # contains I;16, we need to convert from native to image + # byte order. + if im.mode in ('I;16B', 'I;16'): + rawmode = 'I;16N' + + a = (rawmode, compression, _fp, filename, atts) + # print(im.mode, compression, a, im.encoderconfig) + e = Image._getencoder(im.mode, 'libtiff', a, im.encoderconfig) + e.setimage(im.im, (0, 0)+im.size) + while True: + # undone, change to self.decodermaxblock: + l, s, d = e.encode(16*1024) + if not _fp: + fp.write(d) + if s: + break + if s < 0: + raise IOError("encoder error %d when writing image file" % s) + + else: + offset = ifd.save(fp) + + ImageFile._save(im, fp, [ + ("raw", (0, 0)+im.size, offset, (rawmode, stride, 1)) + ]) + + # -- helper for multi-page save -- + if "_debug_multipage" in im.encoderinfo: + # just to access o32 and o16 (using correct byte order) + im._debug_multipage = ifd + + +class AppendingTiffWriter: + fieldSizes = [ + 0, # None + 1, # byte + 1, # ascii + 2, # short + 4, # long + 8, # rational + 1, # sbyte + 1, # undefined + 2, # sshort + 4, # slong + 8, # srational + 4, # float + 8, # double + ] + + # StripOffsets = 273 + # FreeOffsets = 288 + # TileOffsets = 324 + # JPEGQTables = 519 + # JPEGDCTables = 520 + # JPEGACTables = 521 + Tags = {273, 288, 324, 519, 520, 521} + + def __init__(self, fn, new=False): + if hasattr(fn, 'read'): + self.f = fn + self.close_fp = False + else: + self.name = fn + self.close_fp = True + try: + self.f = io.open(fn, "w+b" if new else "r+b") + except IOError: + self.f = io.open(fn, "w+b") + self.beginning = self.f.tell() + self.setup() + + def setup(self): + # Reset everything. + self.f.seek(self.beginning, os.SEEK_SET) + + self.whereToWriteNewIFDOffset = None + self.offsetOfNewPage = 0 + + self.IIMM = IIMM = self.f.read(4) + if not IIMM: + # empty file - first page + self.isFirst = True + return + + self.isFirst = False + if IIMM == b"II\x2a\x00": + self.setEndian("<") + elif IIMM == b"MM\x00\x2a": + self.setEndian(">") + else: + raise RuntimeError("Invalid TIFF file header") + + self.skipIFDs() + self.goToEnd() + + def finalize(self): + if self.isFirst: + return + + # fix offsets + self.f.seek(self.offsetOfNewPage) + + IIMM = self.f.read(4) + if not IIMM: + # raise RuntimeError("nothing written into new page") + # Make it easy to finish a frame without committing to a new one. + return + + if IIMM != self.IIMM: + raise RuntimeError("IIMM of new page doesn't match IIMM of " + "first page") + + IFDoffset = self.readLong() + IFDoffset += self.offsetOfNewPage + self.f.seek(self.whereToWriteNewIFDOffset) + self.writeLong(IFDoffset) + self.f.seek(IFDoffset) + self.fixIFD() + + def newFrame(self): + # Call this to finish a frame. + self.finalize() + self.setup() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.close_fp: + self.close() + return False + + def tell(self): + return self.f.tell() - self.offsetOfNewPage + + def seek(self, offset, whence): + if whence == os.SEEK_SET: + offset += self.offsetOfNewPage + + self.f.seek(offset, whence) + return self.tell() + + def goToEnd(self): + self.f.seek(0, os.SEEK_END) + pos = self.f.tell() + + # pad to 16 byte boundary + padBytes = 16 - pos % 16 + if 0 < padBytes < 16: + self.f.write(bytes(bytearray(padBytes))) + self.offsetOfNewPage = self.f.tell() + + def setEndian(self, endian): + self.endian = endian + self.longFmt = self.endian + "L" + self.shortFmt = self.endian + "H" + self.tagFormat = self.endian + "HHL" + + def skipIFDs(self): + while True: + IFDoffset = self.readLong() + if IFDoffset == 0: + self.whereToWriteNewIFDOffset = self.f.tell() - 4 + break + + self.f.seek(IFDoffset) + numTags = self.readShort() + self.f.seek(numTags * 12, os.SEEK_CUR) + + def write(self, data): + return self.f.write(data) + + def readShort(self): + value, = struct.unpack(self.shortFmt, self.f.read(2)) + return value + + def readLong(self): + value, = struct.unpack(self.longFmt, self.f.read(4)) + return value + + def rewriteLastShortToLong(self, value): + self.f.seek(-2, os.SEEK_CUR) + bytesWritten = self.f.write(struct.pack(self.longFmt, value)) + if bytesWritten is not None and bytesWritten != 4: + raise RuntimeError("wrote only %u bytes but wanted 4" % + bytesWritten) + + def rewriteLastShort(self, value): + self.f.seek(-2, os.SEEK_CUR) + bytesWritten = self.f.write(struct.pack(self.shortFmt, value)) + if bytesWritten is not None and bytesWritten != 2: + raise RuntimeError("wrote only %u bytes but wanted 2" % + bytesWritten) + + def rewriteLastLong(self, value): + self.f.seek(-4, os.SEEK_CUR) + bytesWritten = self.f.write(struct.pack(self.longFmt, value)) + if bytesWritten is not None and bytesWritten != 4: + raise RuntimeError("wrote only %u bytes but wanted 4" % + bytesWritten) + + def writeShort(self, value): + bytesWritten = self.f.write(struct.pack(self.shortFmt, value)) + if bytesWritten is not None and bytesWritten != 2: + raise RuntimeError("wrote only %u bytes but wanted 2" % + bytesWritten) + + def writeLong(self, value): + bytesWritten = self.f.write(struct.pack(self.longFmt, value)) + if bytesWritten is not None and bytesWritten != 4: + raise RuntimeError("wrote only %u bytes but wanted 4" % + bytesWritten) + + def close(self): + self.finalize() + self.f.close() + + def fixIFD(self): + numTags = self.readShort() + + for i in range(numTags): + tag, fieldType, count = struct.unpack(self.tagFormat, + self.f.read(8)) + + fieldSize = self.fieldSizes[fieldType] + totalSize = fieldSize * count + isLocal = (totalSize <= 4) + if not isLocal: + offset = self.readLong() + offset += self.offsetOfNewPage + self.rewriteLastLong(offset) + + if tag in self.Tags: + curPos = self.f.tell() + + if isLocal: + self.fixOffsets(count, isShort=(fieldSize == 2), + isLong=(fieldSize == 4)) + self.f.seek(curPos + 4) + else: + self.f.seek(offset) + self.fixOffsets(count, isShort=(fieldSize == 2), + isLong=(fieldSize == 4)) + self.f.seek(curPos) + + offset = curPos = None + + elif isLocal: + # skip the locally stored value that is not an offset + self.f.seek(4, os.SEEK_CUR) + + def fixOffsets(self, count, isShort=False, isLong=False): + if not isShort and not isLong: + raise RuntimeError("offset is neither short nor long") + + for i in range(count): + offset = self.readShort() if isShort else self.readLong() + offset += self.offsetOfNewPage + if isShort and offset >= 65536: + # offset is now too large - we must convert shorts to longs + if count != 1: + raise RuntimeError("not implemented") # XXX TODO + + # simple case - the offset is just one and therefore it is + # local (not referenced with another offset) + self.rewriteLastShortToLong(offset) + self.f.seek(-10, os.SEEK_CUR) + self.writeShort(4) # rewrite the type to LONG + self.f.seek(8, os.SEEK_CUR) + elif isShort: + self.rewriteLastShort(offset) + else: + self.rewriteLastLong(offset) + + +def _save_all(im, fp, filename): + encoderinfo = im.encoderinfo.copy() + encoderconfig = im.encoderconfig + append_images = list(encoderinfo.get("append_images", [])) + if not hasattr(im, "n_frames") and not append_images: + return _save(im, fp, filename) + + cur_idx = im.tell() + try: + with AppendingTiffWriter(fp) as tf: + for ims in [im]+append_images: + ims.encoderinfo = encoderinfo + ims.encoderconfig = encoderconfig + if not hasattr(ims, "n_frames"): + nfr = 1 + else: + nfr = ims.n_frames + + for idx in range(nfr): + ims.seek(idx) + ims.load() + _save(ims, tf, filename) + tf.newFrame() + finally: + im.seek(cur_idx) + + +# +# -------------------------------------------------------------------- +# Register + +Image.register_open(TiffImageFile.format, TiffImageFile, _accept) +Image.register_save(TiffImageFile.format, _save) +Image.register_save_all(TiffImageFile.format, _save_all) + +Image.register_extensions(TiffImageFile.format, [".tif", ".tiff"]) + +Image.register_mime(TiffImageFile.format, "image/tiff") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff767a6e77d3f38ad18901572dc416b8b2bcdb04 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffTags.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffTags.py new file mode 100644 index 0000000000000000000000000000000000000000..427f3a4894008c97ec42c53112817ebe60c2c9bb --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffTags.py @@ -0,0 +1,448 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TIFF tags +# +# This module provides clear-text names for various well-known +# TIFF tags. the TIFF codec works just fine without it. +# +# Copyright (c) Secret Labs AB 1999. +# +# See the README file for information on usage and redistribution. +# + +## +# This module provides constants and clear-text names for various +# well-known TIFF tags. +## + +from collections import namedtuple + + +class TagInfo(namedtuple("_TagInfo", "value name type length enum")): + __slots__ = [] + + def __new__(cls, value=None, name="unknown", type=None, length=None, enum=None): + return super(TagInfo, cls).__new__( + cls, value, name, type, length, enum or {}) + + def cvt_enum(self, value): + return self.enum.get(value, value) + + +def lookup(tag): + """ + :param tag: Integer tag number + :returns: Taginfo namedtuple, From the TAGS_V2 info if possible, + otherwise just populating the value and name from TAGS. + If the tag is not recognized, "unknown" is returned for the name + + """ + + return TAGS_V2.get(tag, TagInfo(tag, TAGS.get(tag, 'unknown'))) + + +## +# Map tag numbers to tag info. +# +# id: (Name, Type, Length, enum_values) +# +# The length here differs from the length in the tiff spec. For +# numbers, the tiff spec is for the number of fields returned. We +# agree here. For string-like types, the tiff spec uses the length of +# field in bytes. In Pillow, we are using the number of expected +# fields, in general 1 for string-like types. + + +BYTE = 1 +ASCII = 2 +SHORT = 3 +LONG = 4 +RATIONAL = 5 +UNDEFINED = 7 +SIGNED_RATIONAL = 10 +DOUBLE = 12 + +TAGS_V2 = { + + 254: ("NewSubfileType", LONG, 1), + 255: ("SubfileType", SHORT, 1), + 256: ("ImageWidth", LONG, 1), + 257: ("ImageLength", LONG, 1), + 258: ("BitsPerSample", SHORT, 0), + 259: ("Compression", SHORT, 1, + {"Uncompressed": 1, "CCITT 1d": 2, "Group 3 Fax": 3, "Group 4 Fax": 4, + "LZW": 5, "JPEG": 6, "PackBits": 32773}), + + 262: ("PhotometricInterpretation", SHORT, 1, + {"WhiteIsZero": 0, "BlackIsZero": 1, "RGB": 2, "RGB Palette": 3, + "Transparency Mask": 4, "CMYK": 5, "YCbCr": 6, "CieLAB": 8, + "CFA": 32803, # TIFF/EP, Adobe DNG + "LinearRaw": 32892}), # Adobe DNG + 263: ("Threshholding", SHORT, 1), + 264: ("CellWidth", SHORT, 1), + 265: ("CellLength", SHORT, 1), + 266: ("FillOrder", SHORT, 1), + 269: ("DocumentName", ASCII, 1), + + 270: ("ImageDescription", ASCII, 1), + 271: ("Make", ASCII, 1), + 272: ("Model", ASCII, 1), + 273: ("StripOffsets", LONG, 0), + 274: ("Orientation", SHORT, 1), + 277: ("SamplesPerPixel", SHORT, 1), + 278: ("RowsPerStrip", LONG, 1), + 279: ("StripByteCounts", LONG, 0), + + 280: ("MinSampleValue", LONG, 0), + 281: ("MaxSampleValue", SHORT, 0), + 282: ("XResolution", RATIONAL, 1), + 283: ("YResolution", RATIONAL, 1), + 284: ("PlanarConfiguration", SHORT, 1, {"Contiguous": 1, "Separate": 2}), + 285: ("PageName", ASCII, 1), + 286: ("XPosition", RATIONAL, 1), + 287: ("YPosition", RATIONAL, 1), + 288: ("FreeOffsets", LONG, 1), + 289: ("FreeByteCounts", LONG, 1), + + 290: ("GrayResponseUnit", SHORT, 1), + 291: ("GrayResponseCurve", SHORT, 0), + 292: ("T4Options", LONG, 1), + 293: ("T6Options", LONG, 1), + 296: ("ResolutionUnit", SHORT, 1, {"none": 1, "inch": 2, "cm": 3}), + 297: ("PageNumber", SHORT, 2), + + 301: ("TransferFunction", SHORT, 0), + 305: ("Software", ASCII, 1), + 306: ("DateTime", ASCII, 1), + + 315: ("Artist", ASCII, 1), + 316: ("HostComputer", ASCII, 1), + 317: ("Predictor", SHORT, 1, {"none": 1, "Horizontal Differencing": 2}), + 318: ("WhitePoint", RATIONAL, 2), + 319: ("PrimaryChromaticities", SHORT, 6), + + 320: ("ColorMap", SHORT, 0), + 321: ("HalftoneHints", SHORT, 2), + 322: ("TileWidth", LONG, 1), + 323: ("TileLength", LONG, 1), + 324: ("TileOffsets", LONG, 0), + 325: ("TileByteCounts", LONG, 0), + + 332: ("InkSet", SHORT, 1), + 333: ("InkNames", ASCII, 1), + 334: ("NumberOfInks", SHORT, 1), + 336: ("DotRange", SHORT, 0), + 337: ("TargetPrinter", ASCII, 1), + 338: ("ExtraSamples", SHORT, 0), + 339: ("SampleFormat", SHORT, 0), + + 340: ("SMinSampleValue", DOUBLE, 0), + 341: ("SMaxSampleValue", DOUBLE, 0), + 342: ("TransferRange", SHORT, 6), + + 347: ("JPEGTables", UNDEFINED, 1), + + # obsolete JPEG tags + 512: ("JPEGProc", SHORT, 1), + 513: ("JPEGInterchangeFormat", LONG, 1), + 514: ("JPEGInterchangeFormatLength", LONG, 1), + 515: ("JPEGRestartInterval", SHORT, 1), + 517: ("JPEGLosslessPredictors", SHORT, 0), + 518: ("JPEGPointTransforms", SHORT, 0), + 519: ("JPEGQTables", LONG, 0), + 520: ("JPEGDCTables", LONG, 0), + 521: ("JPEGACTables", LONG, 0), + + 529: ("YCbCrCoefficients", RATIONAL, 3), + 530: ("YCbCrSubSampling", SHORT, 2), + 531: ("YCbCrPositioning", SHORT, 1), + 532: ("ReferenceBlackWhite", LONG, 0), + + 700: ('XMP', BYTE, 1), + + 33432: ("Copyright", ASCII, 1), + 34377: ('PhotoshopInfo', BYTE, 1), + + # FIXME add more tags here + 34665: ("ExifIFD", SHORT, 1), + 34675: ('ICCProfile', UNDEFINED, 1), + 34853: ('GPSInfoIFD', BYTE, 1), + + # MPInfo + 45056: ("MPFVersion", UNDEFINED, 1), + 45057: ("NumberOfImages", LONG, 1), + 45058: ("MPEntry", UNDEFINED, 1), + 45059: ("ImageUIDList", UNDEFINED, 0), # UNDONE, check + 45060: ("TotalFrames", LONG, 1), + 45313: ("MPIndividualNum", LONG, 1), + 45569: ("PanOrientation", LONG, 1), + 45570: ("PanOverlap_H", RATIONAL, 1), + 45571: ("PanOverlap_V", RATIONAL, 1), + 45572: ("BaseViewpointNum", LONG, 1), + 45573: ("ConvergenceAngle", SIGNED_RATIONAL, 1), + 45574: ("BaselineLength", RATIONAL, 1), + 45575: ("VerticalDivergence", SIGNED_RATIONAL, 1), + 45576: ("AxisDistance_X", SIGNED_RATIONAL, 1), + 45577: ("AxisDistance_Y", SIGNED_RATIONAL, 1), + 45578: ("AxisDistance_Z", SIGNED_RATIONAL, 1), + 45579: ("YawAngle", SIGNED_RATIONAL, 1), + 45580: ("PitchAngle", SIGNED_RATIONAL, 1), + 45581: ("RollAngle", SIGNED_RATIONAL, 1), + + 50741: ("MakerNoteSafety", SHORT, 1, {"Unsafe": 0, "Safe": 1}), + 50780: ("BestQualityScale", RATIONAL, 1), + 50838: ("ImageJMetaDataByteCounts", LONG, 0), # Can be more than one + 50839: ("ImageJMetaData", UNDEFINED, 1) # see Issue #2006 +} + +# Legacy Tags structure +# these tags aren't included above, but were in the previous versions +TAGS = {347: 'JPEGTables', + 700: 'XMP', + + # Additional Exif Info + 32932: 'Wang Annotation', + 33434: 'ExposureTime', + 33437: 'FNumber', + 33445: 'MD FileTag', + 33446: 'MD ScalePixel', + 33447: 'MD ColorTable', + 33448: 'MD LabName', + 33449: 'MD SampleInfo', + 33450: 'MD PrepDate', + 33451: 'MD PrepTime', + 33452: 'MD FileUnits', + 33550: 'ModelPixelScaleTag', + 33723: 'IptcNaaInfo', + 33918: 'INGR Packet Data Tag', + 33919: 'INGR Flag Registers', + 33920: 'IrasB Transformation Matrix', + 33922: 'ModelTiepointTag', + 34264: 'ModelTransformationTag', + 34377: 'PhotoshopInfo', + 34735: 'GeoKeyDirectoryTag', + 34736: 'GeoDoubleParamsTag', + 34737: 'GeoAsciiParamsTag', + 34850: 'ExposureProgram', + 34852: 'SpectralSensitivity', + 34855: 'ISOSpeedRatings', + 34856: 'OECF', + 34864: 'SensitivityType', + 34865: 'StandardOutputSensitivity', + 34866: 'RecommendedExposureIndex', + 34867: 'ISOSpeed', + 34868: 'ISOSpeedLatitudeyyy', + 34869: 'ISOSpeedLatitudezzz', + 34908: 'HylaFAX FaxRecvParams', + 34909: 'HylaFAX FaxSubAddress', + 34910: 'HylaFAX FaxRecvTime', + 36864: 'ExifVersion', + 36867: 'DateTimeOriginal', + 36868: 'DateTImeDigitized', + 37121: 'ComponentsConfiguration', + 37122: 'CompressedBitsPerPixel', + 37724: 'ImageSourceData', + 37377: 'ShutterSpeedValue', + 37378: 'ApertureValue', + 37379: 'BrightnessValue', + 37380: 'ExposureBiasValue', + 37381: 'MaxApertureValue', + 37382: 'SubjectDistance', + 37383: 'MeteringMode', + 37384: 'LightSource', + 37385: 'Flash', + 37386: 'FocalLength', + 37396: 'SubjectArea', + 37500: 'MakerNote', + 37510: 'UserComment', + 37520: 'SubSec', + 37521: 'SubSecTimeOriginal', + 37522: 'SubsecTimeDigitized', + 40960: 'FlashPixVersion', + 40961: 'ColorSpace', + 40962: 'PixelXDimension', + 40963: 'PixelYDimension', + 40964: 'RelatedSoundFile', + 40965: 'InteroperabilityIFD', + 41483: 'FlashEnergy', + 41484: 'SpatialFrequencyResponse', + 41486: 'FocalPlaneXResolution', + 41487: 'FocalPlaneYResolution', + 41488: 'FocalPlaneResolutionUnit', + 41492: 'SubjectLocation', + 41493: 'ExposureIndex', + 41495: 'SensingMethod', + 41728: 'FileSource', + 41729: 'SceneType', + 41730: 'CFAPattern', + 41985: 'CustomRendered', + 41986: 'ExposureMode', + 41987: 'WhiteBalance', + 41988: 'DigitalZoomRatio', + 41989: 'FocalLengthIn35mmFilm', + 41990: 'SceneCaptureType', + 41991: 'GainControl', + 41992: 'Contrast', + 41993: 'Saturation', + 41994: 'Sharpness', + 41995: 'DeviceSettingDescription', + 41996: 'SubjectDistanceRange', + 42016: 'ImageUniqueID', + 42032: 'CameraOwnerName', + 42033: 'BodySerialNumber', + 42034: 'LensSpecification', + 42035: 'LensMake', + 42036: 'LensModel', + 42037: 'LensSerialNumber', + 42112: 'GDAL_METADATA', + 42113: 'GDAL_NODATA', + 42240: 'Gamma', + 50215: 'Oce Scanjob Description', + 50216: 'Oce Application Selector', + 50217: 'Oce Identification Number', + 50218: 'Oce ImageLogic Characteristics', + + # Adobe DNG + 50706: 'DNGVersion', + 50707: 'DNGBackwardVersion', + 50708: 'UniqueCameraModel', + 50709: 'LocalizedCameraModel', + 50710: 'CFAPlaneColor', + 50711: 'CFALayout', + 50712: 'LinearizationTable', + 50713: 'BlackLevelRepeatDim', + 50714: 'BlackLevel', + 50715: 'BlackLevelDeltaH', + 50716: 'BlackLevelDeltaV', + 50717: 'WhiteLevel', + 50718: 'DefaultScale', + 50719: 'DefaultCropOrigin', + 50720: 'DefaultCropSize', + 50721: 'ColorMatrix1', + 50722: 'ColorMatrix2', + 50723: 'CameraCalibration1', + 50724: 'CameraCalibration2', + 50725: 'ReductionMatrix1', + 50726: 'ReductionMatrix2', + 50727: 'AnalogBalance', + 50728: 'AsShotNeutral', + 50729: 'AsShotWhiteXY', + 50730: 'BaselineExposure', + 50731: 'BaselineNoise', + 50732: 'BaselineSharpness', + 50733: 'BayerGreenSplit', + 50734: 'LinearResponseLimit', + 50735: 'CameraSerialNumber', + 50736: 'LensInfo', + 50737: 'ChromaBlurRadius', + 50738: 'AntiAliasStrength', + 50740: 'DNGPrivateData', + 50778: 'CalibrationIlluminant1', + 50779: 'CalibrationIlluminant2', + 50784: 'Alias Layer Metadata' + } + + +def _populate(): + for k, v in TAGS_V2.items(): + # Populate legacy structure. + TAGS[k] = v[0] + if len(v) == 4: + for sk, sv in v[3].items(): + TAGS[(k, sv)] = sk + + TAGS_V2[k] = TagInfo(k, *v) + + +_populate() +## +# Map type numbers to type names -- defined in ImageFileDirectory. + +TYPES = {} + +# was: +# TYPES = { +# 1: "byte", +# 2: "ascii", +# 3: "short", +# 4: "long", +# 5: "rational", +# 6: "signed byte", +# 7: "undefined", +# 8: "signed short", +# 9: "signed long", +# 10: "signed rational", +# 11: "float", +# 12: "double", +# } + +# +# These tags are handled by default in libtiff, without +# adding to the custom dictionary. From tif_dir.c, searching for +# case TIFFTAG in the _TIFFVSetField function: +# Line: item. +# 148: case TIFFTAG_SUBFILETYPE: +# 151: case TIFFTAG_IMAGEWIDTH: +# 154: case TIFFTAG_IMAGELENGTH: +# 157: case TIFFTAG_BITSPERSAMPLE: +# 181: case TIFFTAG_COMPRESSION: +# 202: case TIFFTAG_PHOTOMETRIC: +# 205: case TIFFTAG_THRESHHOLDING: +# 208: case TIFFTAG_FILLORDER: +# 214: case TIFFTAG_ORIENTATION: +# 221: case TIFFTAG_SAMPLESPERPIXEL: +# 228: case TIFFTAG_ROWSPERSTRIP: +# 238: case TIFFTAG_MINSAMPLEVALUE: +# 241: case TIFFTAG_MAXSAMPLEVALUE: +# 244: case TIFFTAG_SMINSAMPLEVALUE: +# 247: case TIFFTAG_SMAXSAMPLEVALUE: +# 250: case TIFFTAG_XRESOLUTION: +# 256: case TIFFTAG_YRESOLUTION: +# 262: case TIFFTAG_PLANARCONFIG: +# 268: case TIFFTAG_XPOSITION: +# 271: case TIFFTAG_YPOSITION: +# 274: case TIFFTAG_RESOLUTIONUNIT: +# 280: case TIFFTAG_PAGENUMBER: +# 284: case TIFFTAG_HALFTONEHINTS: +# 288: case TIFFTAG_COLORMAP: +# 294: case TIFFTAG_EXTRASAMPLES: +# 298: case TIFFTAG_MATTEING: +# 305: case TIFFTAG_TILEWIDTH: +# 316: case TIFFTAG_TILELENGTH: +# 327: case TIFFTAG_TILEDEPTH: +# 333: case TIFFTAG_DATATYPE: +# 344: case TIFFTAG_SAMPLEFORMAT: +# 361: case TIFFTAG_IMAGEDEPTH: +# 364: case TIFFTAG_SUBIFD: +# 376: case TIFFTAG_YCBCRPOSITIONING: +# 379: case TIFFTAG_YCBCRSUBSAMPLING: +# 383: case TIFFTAG_TRANSFERFUNCTION: +# 389: case TIFFTAG_REFERENCEBLACKWHITE: +# 393: case TIFFTAG_INKNAMES: + +# some of these are not in our TAGS_V2 dict and were included from tiff.h + +LIBTIFF_CORE = {255, 256, 257, 258, 259, 262, 263, 266, 274, 277, + 278, 280, 281, 340, 341, 282, 283, 284, 286, 287, + 296, 297, 321, 320, 338, 32995, 322, 323, 32998, + 32996, 339, 32997, 330, 531, 530, 301, 532, 333, + # as above + 269 # this has been in our tests forever, and works + } + +LIBTIFF_CORE.remove(320) # Array of short, crashes +LIBTIFF_CORE.remove(301) # Array of short, crashes +LIBTIFF_CORE.remove(532) # Array of long, crashes + +LIBTIFF_CORE.remove(255) # We don't have support for subfiletypes +LIBTIFF_CORE.remove(322) # We don't have support for tiled images in libtiff +LIBTIFF_CORE.remove(323) # Tiled images +LIBTIFF_CORE.remove(333) # Ink Names either + +# Note to advanced users: There may be combinations of these +# parameters and values that when added properly, will work and +# produce valid tiff images that may work in your application. +# It is safe to add and remove tags from this set from Pillow's point +# of view so long as you test against libtiff. diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffTags.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffTags.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea522af5ca85711182f20d494f14d8fd15fb7446 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/TiffTags.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WalImageFile.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WalImageFile.py new file mode 100644 index 0000000000000000000000000000000000000000..6602cc86bbbb5152fa341281782769f8511957e4 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WalImageFile.py @@ -0,0 +1,128 @@ +# encoding: utf-8 +# +# The Python Imaging Library. +# $Id$ +# +# WAL file handling +# +# History: +# 2003-04-23 fl created +# +# Copyright (c) 2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +# NOTE: This format cannot be automatically recognized, so the reader +# is not registered for use with Image.open(). To open a WAL file, use +# the WalImageFile.open() function instead. + +# This reader is based on the specification available from: +# https://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml +# and has been tested with a few sample files found using google. + +from . import Image +from ._binary import i32le as i32 + +try: + import builtins +except ImportError: + import __builtin__ + builtins = __builtin__ + + +def open(filename): + """ + Load texture from a Quake2 WAL texture file. + + By default, a Quake2 standard palette is attached to the texture. + To override the palette, use the putpalette method. + + :param filename: WAL file name, or an opened file handle. + :returns: An image instance. + """ + # FIXME: modify to return a WalImageFile instance instead of + # plain Image object ? + + def imopen(fp): + # read header fields + header = fp.read(32+24+32+12) + size = i32(header, 32), i32(header, 36) + offset = i32(header, 40) + + # load pixel data + fp.seek(offset) + + Image._decompression_bomb_check(size) + im = Image.frombytes("P", size, fp.read(size[0] * size[1])) + im.putpalette(quake2palette) + + im.format = "WAL" + im.format_description = "Quake2 Texture" + + # strings are null-terminated + im.info["name"] = header[:32].split(b"\0", 1)[0] + next_name = header[56:56+32].split(b"\0", 1)[0] + if next_name: + im.info["next_name"] = next_name + + return im + + if hasattr(filename, "read"): + return imopen(filename) + else: + with builtins.open(filename, "rb") as fp: + return imopen(fp) + + +quake2palette = ( + # default palette taken from piffo 0.93 by Hans Häggström + b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e" + b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f" + b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c" + b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b" + b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10" + b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07" + b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f" + b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16" + b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d" + b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31" + b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28" + b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07" + b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27" + b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b" + b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01" + b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21" + b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14" + b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07" + b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14" + b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f" + b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34" + b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d" + b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14" + b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01" + b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24" + b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10" + b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01" + b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27" + b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c" + b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a" + b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26" + b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d" + b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01" + b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20" + b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17" + b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07" + b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25" + b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c" + b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01" + b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23" + b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f" + b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b" + b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37" + b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b" + b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01" + b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10" + b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b" + b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20" +) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WalImageFile.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WalImageFile.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e21313601977ceec4212e21749539384cd37ba89 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WalImageFile.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WebPImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WebPImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..39a8f2e35dd50287f9fa449cb6c5b8fdee239704 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WebPImagePlugin.py @@ -0,0 +1,316 @@ +from . import Image, ImageFile, _webp +from io import BytesIO + + +_VALID_WEBP_MODES = { + "RGBX": True, + "RGBA": True, + } + +_VALID_WEBP_LEGACY_MODES = { + "RGB": True, + "RGBA": True, + } + +_VP8_MODES_BY_IDENTIFIER = { + b"VP8 ": "RGB", + b"VP8X": "RGBA", + b"VP8L": "RGBA", # lossless + } + + +def _accept(prefix): + is_riff_file_format = prefix[:4] == b"RIFF" + is_webp_file = prefix[8:12] == b"WEBP" + is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER + + return is_riff_file_format and is_webp_file and is_valid_vp8_mode + + +class WebPImageFile(ImageFile.ImageFile): + + format = "WEBP" + format_description = "WebP image" + + def _open(self): + if not _webp.HAVE_WEBPANIM: + # Legacy mode + data, width, height, self.mode, icc_profile, exif = \ + _webp.WebPDecode(self.fp.read()) + if icc_profile: + self.info["icc_profile"] = icc_profile + if exif: + self.info["exif"] = exif + self.size = width, height + self.fp = BytesIO(data) + self.tile = [("raw", (0, 0) + self.size, 0, self.mode)] + self._n_frames = 1 + return + + # Use the newer AnimDecoder API to parse the (possibly) animated file, + # and access muxed chunks like ICC/EXIF/XMP. + self._decoder = _webp.WebPAnimDecoder(self.fp.read()) + + # Get info from decoder + width, height, loop_count, bgcolor, frame_count, mode = \ + self._decoder.get_info() + self.size = width, height + self.info["loop"] = loop_count + bg_a, bg_r, bg_g, bg_b = \ + (bgcolor >> 24) & 0xFF, \ + (bgcolor >> 16) & 0xFF, \ + (bgcolor >> 8) & 0xFF, \ + bgcolor & 0xFF + self.info["background"] = (bg_r, bg_g, bg_b, bg_a) + self._n_frames = frame_count + self.mode = mode + self.tile = [] + + # Attempt to read ICC / EXIF / XMP chunks from file + icc_profile = self._decoder.get_chunk("ICCP") + exif = self._decoder.get_chunk("EXIF") + xmp = self._decoder.get_chunk("XMP ") + if icc_profile: + self.info["icc_profile"] = icc_profile + if exif: + self.info["exif"] = exif + if xmp: + self.info["xmp"] = xmp + + # Initialize seek state + self._reset(reset=False) + self.seek(0) + + def _getexif(self): + from .JpegImagePlugin import _getexif + return _getexif(self) + + @property + def n_frames(self): + return self._n_frames + + @property + def is_animated(self): + return self._n_frames > 1 + + def seek(self, frame): + if not _webp.HAVE_WEBPANIM: + return super(WebPImageFile, self).seek(frame) + + # Perform some simple checks first + if frame >= self._n_frames: + raise EOFError("attempted to seek beyond end of sequence") + if frame < 0: + raise EOFError("negative frame index is not valid") + + # Set logical frame to requested position + self.__logical_frame = frame + + def _reset(self, reset=True): + if reset: + self._decoder.reset() + self.__physical_frame = 0 + self.__loaded = -1 + self.__timestamp = 0 + + def _get_next(self): + # Get next frame + ret = self._decoder.get_next() + self.__physical_frame += 1 + + # Check if an error occurred + if ret is None: + self._reset() # Reset just to be safe + self.seek(0) + raise EOFError("failed to decode next frame in WebP file") + + # Compute duration + data, timestamp = ret + duration = timestamp - self.__timestamp + self.__timestamp = timestamp + + # libwebp gives frame end, adjust to start of frame + timestamp -= duration + return data, timestamp, duration + + def _seek(self, frame): + if self.__physical_frame == frame: + return # Nothing to do + if frame < self.__physical_frame: + self._reset() # Rewind to beginning + while self.__physical_frame < frame: + self._get_next() # Advance to the requested frame + + def load(self): + if _webp.HAVE_WEBPANIM: + if self.__loaded != self.__logical_frame: + self._seek(self.__logical_frame) + + # We need to load the image data for this frame + data, timestamp, duration = self._get_next() + self.info["timestamp"] = timestamp + self.info["duration"] = duration + self.__loaded = self.__logical_frame + + # Set tile + self.fp = BytesIO(data) + self.tile = [("raw", (0, 0) + self.size, 0, self.mode)] + + return super(WebPImageFile, self).load() + + def tell(self): + if not _webp.HAVE_WEBPANIM: + return super(WebPImageFile, self).tell() + + return self.__logical_frame + + +def _save_all(im, fp, filename): + encoderinfo = im.encoderinfo.copy() + append_images = list(encoderinfo.get("append_images", [])) + + # If total frame count is 1, then save using the legacy API, which + # will preserve non-alpha modes + total = 0 + for ims in [im]+append_images: + total += 1 if not hasattr(ims, "n_frames") else ims.n_frames + if total == 1: + _save(im, fp, filename) + return + + background = encoderinfo.get("background", (0, 0, 0, 0)) + duration = im.encoderinfo.get("duration", 0) + loop = im.encoderinfo.get("loop", 0) + minimize_size = im.encoderinfo.get("minimize_size", False) + kmin = im.encoderinfo.get("kmin", None) + kmax = im.encoderinfo.get("kmax", None) + allow_mixed = im.encoderinfo.get("allow_mixed", False) + verbose = False + lossless = im.encoderinfo.get("lossless", False) + quality = im.encoderinfo.get("quality", 80) + method = im.encoderinfo.get("method", 0) + icc_profile = im.encoderinfo.get("icc_profile", "") + exif = im.encoderinfo.get("exif", "") + xmp = im.encoderinfo.get("xmp", "") + if allow_mixed: + lossless = False + + # Sensible keyframe defaults are from gif2webp.c script + if kmin is None: + kmin = 9 if lossless else 3 + if kmax is None: + kmax = 17 if lossless else 5 + + # Validate background color + if (not isinstance(background, (list, tuple)) or len(background) != 4 or + not all(v >= 0 and v < 256 for v in background)): + raise IOError("Background color is not an RGBA tuple clamped " + "to (0-255): %s" % str(background)) + + # Convert to packed uint + bg_r, bg_g, bg_b, bg_a = background + background = (bg_a << 24) | (bg_r << 16) | (bg_g << 8) | (bg_b << 0) + + # Setup the WebP animation encoder + enc = _webp.WebPAnimEncoder( + im.size[0], im.size[1], + background, + loop, + minimize_size, + kmin, kmax, + allow_mixed, + verbose + ) + + # Add each frame + frame_idx = 0 + timestamp = 0 + cur_idx = im.tell() + try: + for ims in [im]+append_images: + # Get # of frames in this image + if not hasattr(ims, "n_frames"): + nfr = 1 + else: + nfr = ims.n_frames + + for idx in range(nfr): + ims.seek(idx) + ims.load() + + # Make sure image mode is supported + frame = ims + if ims.mode not in _VALID_WEBP_MODES: + alpha = ims.mode == 'P' and 'A' in ims.im.getpalettemode() + frame = ims.convert('RGBA' if alpha else 'RGBX') + + # Append the frame to the animation encoder + enc.add( + frame.tobytes(), + timestamp, + frame.size[0], frame.size[1], + frame.mode, + lossless, + quality, + method + ) + + # Update timestamp and frame index + if isinstance(duration, (list, tuple)): + timestamp += duration[frame_idx] + else: + timestamp += duration + frame_idx += 1 + + finally: + im.seek(cur_idx) + + # Force encoder to flush frames + enc.add( + None, + timestamp, + 0, 0, "", lossless, quality, 0 + ) + + # Get the final output from the encoder + data = enc.assemble(icc_profile, exif, xmp) + if data is None: + raise IOError("cannot write file as WebP (encoder returned None)") + + fp.write(data) + + +def _save(im, fp, filename): + lossless = im.encoderinfo.get("lossless", False) + quality = im.encoderinfo.get("quality", 80) + icc_profile = im.encoderinfo.get("icc_profile", "") + exif = im.encoderinfo.get("exif", "") + xmp = im.encoderinfo.get("xmp", "") + + if im.mode not in _VALID_WEBP_LEGACY_MODES: + alpha = im.mode == 'P' and 'A' in im.im.getpalettemode() + im = im.convert('RGBA' if alpha else 'RGB') + + data = _webp.WebPEncode( + im.tobytes(), + im.size[0], + im.size[1], + lossless, + float(quality), + im.mode, + icc_profile, + exif, + xmp + ) + if data is None: + raise IOError("cannot write file as WebP (encoder returned None)") + + fp.write(data) + + +Image.register_open(WebPImageFile.format, WebPImageFile, _accept) +Image.register_save(WebPImageFile.format, _save) +if _webp.HAVE_WEBPANIM: + Image.register_save_all(WebPImageFile.format, _save_all) +Image.register_extension(WebPImageFile.format, ".webp") +Image.register_mime(WebPImageFile.format, "image/webp") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WebPImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WebPImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44be05d4a2486adef234838a531d3c731c23a63e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WebPImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WmfImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WmfImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..213584497a59af17a711ce897aa7be74079f256c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WmfImagePlugin.py @@ -0,0 +1,170 @@ +# +# The Python Imaging Library +# $Id$ +# +# WMF stub codec +# +# history: +# 1996-12-14 fl Created +# 2004-02-22 fl Turned into a stub driver +# 2004-02-23 fl Added EMF support +# +# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +# WMF/EMF reference documentation: +# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf +# http://wvware.sourceforge.net/caolan/index.html +# http://wvware.sourceforge.net/caolan/ora-wmf.html + +from __future__ import print_function + +from . import Image, ImageFile +from ._binary import i16le as word, si16le as short, i32le as dword, si32le as _long +from ._util import py3 + + +__version__ = "0.2" + +_handler = None + +if py3: + long = int + + +def register_handler(handler): + """ + Install application-specific WMF image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +if hasattr(Image.core, "drawwmf"): + # install default handler (windows only) + + class WmfHandler(object): + + def open(self, im): + im.mode = "RGB" + self.bbox = im.info["wmf_bbox"] + + def load(self, im): + im.fp.seek(0) # rewind + return Image.frombytes( + "RGB", im.size, + Image.core.drawwmf(im.fp.read(), im.size, self.bbox), + "raw", "BGR", (im.size[0]*3 + 3) & -4, -1 + ) + + register_handler(WmfHandler()) + +# +# -------------------------------------------------------------------- +# Read WMF file + + +def _accept(prefix): + return ( + prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or + prefix[:4] == b"\x01\x00\x00\x00" + ) + + +## +# Image plugin for Windows metafiles. + +class WmfStubImageFile(ImageFile.StubImageFile): + + format = "WMF" + format_description = "Windows Metafile" + + def _open(self): + + # check placable header + s = self.fp.read(80) + + if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00": + + # placeable windows metafile + + # get units per inch + inch = word(s, 14) + + # get bounding box + x0 = short(s, 6) + y0 = short(s, 8) + x1 = short(s, 10) + y1 = short(s, 12) + + # normalize size to 72 dots per inch + size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch + + self.info["wmf_bbox"] = x0, y0, x1, y1 + + self.info["dpi"] = 72 + + # print(self.mode, self.size, self.info) + + # sanity check (standard metafile header) + if s[22:26] != b"\x01\x00\t\x00": + raise SyntaxError("Unsupported WMF file format") + + elif dword(s) == 1 and s[40:44] == b" EMF": + # enhanced metafile + + # get bounding box + x0 = _long(s, 8) + y0 = _long(s, 12) + x1 = _long(s, 16) + y1 = _long(s, 20) + + # get frame (in 0.01 millimeter units) + frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36) + + # normalize size to 72 dots per inch + size = x1 - x0, y1 - y0 + + # calculate dots per inch from bbox and frame + xdpi = 2540 * (x1 - y0) // (frame[2] - frame[0]) + ydpi = 2540 * (y1 - y0) // (frame[3] - frame[1]) + + self.info["wmf_bbox"] = x0, y0, x1, y1 + + if xdpi == ydpi: + self.info["dpi"] = xdpi + else: + self.info["dpi"] = xdpi, ydpi + + else: + raise SyntaxError("Unsupported file format") + + self.mode = "RGB" + self.size = size + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr(_handler, "save"): + raise IOError("WMF save handler not installed") + _handler.save(im, fp, filename) + +# +# -------------------------------------------------------------------- +# Registry stuff + + +Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept) +Image.register_save(WmfStubImageFile.format, _save) + +Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"]) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WmfImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WmfImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8904b0414bc835d658e59dd77a203261aa0ca46 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/WmfImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XVThumbImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XVThumbImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..a7d39ed89d0cf06122a782bbf813c5f09e83f326 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XVThumbImagePlugin.py @@ -0,0 +1,80 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XV Thumbnail file handler by Charles E. "Gene" Cash +# (gcash@magicnet.net) +# +# see xvcolor.c and xvbrowse.c in the sources to John Bradley's XV, +# available from ftp://ftp.cis.upenn.edu/pub/xv/ +# +# history: +# 98-08-15 cec created (b/w only) +# 98-12-09 cec added color palette +# 98-12-28 fl added to PIL (with only a few very minor modifications) +# +# To do: +# FIXME: make save work (this requires quantization support) +# + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, o8 + +__version__ = "0.1" + +_MAGIC = b"P7 332" + +# standard color palette for thumbnails (RGB332) +PALETTE = b"" +for r in range(8): + for g in range(8): + for b in range(4): + PALETTE = PALETTE + (o8((r*255)//7)+o8((g*255)//7)+o8((b*255)//3)) + + +def _accept(prefix): + return prefix[:6] == _MAGIC + + +## +# Image plugin for XV thumbnail images. + +class XVThumbImageFile(ImageFile.ImageFile): + + format = "XVThumb" + format_description = "XV thumbnail image" + + def _open(self): + + # check magic + if not _accept(self.fp.read(6)): + raise SyntaxError("not an XV thumbnail file") + + # Skip to beginning of next line + self.fp.readline() + + # skip info comments + while True: + s = self.fp.readline() + if not s: + raise SyntaxError("Unexpected EOF reading XV thumbnail file") + if i8(s[0]) != 35: # ie. when not a comment: '#' + break + + # parse header line (already read) + s = s.strip().split() + + self.mode = "P" + self.size = int(s[0]), int(s[1]) + + self.palette = ImagePalette.raw("RGB", PALETTE) + + self.tile = [ + ("raw", (0, 0)+self.size, + self.fp.tell(), (self.mode, 0, 1) + )] + + +# -------------------------------------------------------------------- + +Image.register_open(XVThumbImageFile.format, XVThumbImageFile, _accept) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XVThumbImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XVThumbImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c414b93b4ed0ddd3a6cd7d916b5c9e99803806d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XVThumbImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XbmImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XbmImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..b43fbef500668aca855cd5cad97ada1c768c9367 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XbmImagePlugin.py @@ -0,0 +1,96 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XBM File handling +# +# History: +# 1995-09-08 fl Created +# 1996-11-01 fl Added save support +# 1997-07-07 fl Made header parser more tolerant +# 1997-07-22 fl Fixed yet another parser bug +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4) +# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog) +# 2004-02-24 fl Allow some whitespace before first #define +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import re +from . import Image, ImageFile + +__version__ = "0.6" + +# XBM header +xbm_head = re.compile( + br"\s*#define[ \t]+.*_width[ \t]+(?P[0-9]+)[\r\n]+" + b"#define[ \t]+.*_height[ \t]+(?P[0-9]+)[\r\n]+" + b"(?P" + b"#define[ \t]+[^_]*_x_hot[ \t]+(?P[0-9]+)[\r\n]+" + b"#define[ \t]+[^_]*_y_hot[ \t]+(?P[0-9]+)[\r\n]+" + b")?" + b"[\\000-\\377]*_bits\\[\\]" +) + + +def _accept(prefix): + return prefix.lstrip()[:7] == b"#define" + + +## +# Image plugin for X11 bitmaps. + +class XbmImageFile(ImageFile.ImageFile): + + format = "XBM" + format_description = "X11 Bitmap" + + def _open(self): + + m = xbm_head.match(self.fp.read(512)) + + if m: + + xsize = int(m.group("width")) + ysize = int(m.group("height")) + + if m.group("hotspot"): + self.info["hotspot"] = ( + int(m.group("xhot")), int(m.group("yhot")) + ) + + self.mode = "1" + self.size = xsize, ysize + + self.tile = [("xbm", (0, 0)+self.size, m.end(), None)] + + +def _save(im, fp, filename): + + if im.mode != "1": + raise IOError("cannot write mode %s as XBM" % im.mode) + + fp.write(("#define im_width %d\n" % im.size[0]).encode('ascii')) + fp.write(("#define im_height %d\n" % im.size[1]).encode('ascii')) + + hotspot = im.encoderinfo.get("hotspot") + if hotspot: + fp.write(("#define im_x_hot %d\n" % hotspot[0]).encode('ascii')) + fp.write(("#define im_y_hot %d\n" % hotspot[1]).encode('ascii')) + + fp.write(b"static char im_bits[] = {\n") + + ImageFile._save(im, fp, [("xbm", (0, 0)+im.size, 0, None)]) + + fp.write(b"};\n") + + +Image.register_open(XbmImageFile.format, XbmImageFile, _accept) +Image.register_save(XbmImageFile.format, _save) + +Image.register_extension(XbmImageFile.format, ".xbm") + +Image.register_mime(XbmImageFile.format, "image/xbm") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XbmImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XbmImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab326a818531f6da3c1f679b47beb0d9beede99e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XbmImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XpmImagePlugin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XpmImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..a5cca0e2793a25dc00f745d2dbd3ffa935b1e5f7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XpmImagePlugin.py @@ -0,0 +1,129 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XPM File handling +# +# History: +# 1996-12-29 fl Created +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) +# +# Copyright (c) Secret Labs AB 1997-2001. +# Copyright (c) Fredrik Lundh 1996-2001. +# +# See the README file for information on usage and redistribution. +# + + +import re +from . import Image, ImageFile, ImagePalette +from ._binary import i8, o8 + +__version__ = "0.2" + +# XPM header +xpm_head = re.compile(b"\"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)") + + +def _accept(prefix): + return prefix[:9] == b"/* XPM */" + + +## +# Image plugin for X11 pixel maps. + +class XpmImageFile(ImageFile.ImageFile): + + format = "XPM" + format_description = "X11 Pixel Map" + + def _open(self): + + if not _accept(self.fp.read(9)): + raise SyntaxError("not an XPM file") + + # skip forward to next string + while True: + s = self.fp.readline() + if not s: + raise SyntaxError("broken XPM file") + m = xpm_head.match(s) + if m: + break + + self.size = int(m.group(1)), int(m.group(2)) + + pal = int(m.group(3)) + bpp = int(m.group(4)) + + if pal > 256 or bpp != 1: + raise ValueError("cannot read this XPM file") + + # + # load palette description + + palette = [b"\0\0\0"] * 256 + + for i in range(pal): + + s = self.fp.readline() + if s[-2:] == b'\r\n': + s = s[:-2] + elif s[-1:] in b'\r\n': + s = s[:-1] + + c = i8(s[1]) + s = s[2:-2].split() + + for i in range(0, len(s), 2): + + if s[i] == b"c": + + # process colour key + rgb = s[i+1] + if rgb == b"None": + self.info["transparency"] = c + elif rgb[0:1] == b"#": + # FIXME: handle colour names (see ImagePalette.py) + rgb = int(rgb[1:], 16) + palette[c] = (o8((rgb >> 16) & 255) + + o8((rgb >> 8) & 255) + + o8(rgb & 255)) + else: + # unknown colour + raise ValueError("cannot read this XPM file") + break + + else: + + # missing colour key + raise ValueError("cannot read this XPM file") + + self.mode = "P" + self.palette = ImagePalette.raw("RGB", b"".join(palette)) + + self.tile = [("raw", (0, 0)+self.size, self.fp.tell(), ("P", 0, 1))] + + def load_read(self, bytes): + + # + # load all image data in one chunk + + xsize, ysize = self.size + + s = [None] * ysize + + for i in range(ysize): + s[i] = self.fp.readline()[1:xsize+1].ljust(xsize) + + return b"".join(s) + +# +# Registry + + +Image.register_open(XpmImageFile.format, XpmImageFile, _accept) + +Image.register_extension(XpmImageFile.format, ".xpm") + +Image.register_mime(XpmImageFile.format, "image/xpm") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XpmImagePlugin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XpmImagePlugin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57d511f6fd0754fcd190983370e4cfabd1fc6f43 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/XpmImagePlugin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a07280e3142ce962ca720858c184a6915b3eceb4 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/__init__.py @@ -0,0 +1,74 @@ +"""Pillow {} (Fork of the Python Imaging Library) + +Pillow is the friendly PIL fork by Alex Clark and Contributors. + https://github.com/python-pillow/Pillow/ + +Pillow is forked from PIL 1.1.7. + +PIL is the Python Imaging Library by Fredrik Lundh and Contributors. +Copyright (c) 1999 by Secret Labs AB. + +Use PIL.__version__ for this Pillow version. +PIL.VERSION is the old PIL version and will be removed in the future. + +;-) +""" + +from . import _version + +# VERSION is deprecated and will be removed in Pillow 6.0.0. +# PILLOW_VERSION is deprecated and will be removed after that. +# Use __version__ instead. +VERSION = '1.1.7' # PIL Version +PILLOW_VERSION = __version__ = _version.__version__ + +del _version + +__doc__ = __doc__.format(__version__) # include version in docstring + + +_plugins = ['BlpImagePlugin', + 'BmpImagePlugin', + 'BufrStubImagePlugin', + 'CurImagePlugin', + 'DcxImagePlugin', + 'DdsImagePlugin', + 'EpsImagePlugin', + 'FitsStubImagePlugin', + 'FliImagePlugin', + 'FpxImagePlugin', + 'FtexImagePlugin', + 'GbrImagePlugin', + 'GifImagePlugin', + 'GribStubImagePlugin', + 'Hdf5StubImagePlugin', + 'IcnsImagePlugin', + 'IcoImagePlugin', + 'ImImagePlugin', + 'ImtImagePlugin', + 'IptcImagePlugin', + 'JpegImagePlugin', + 'Jpeg2KImagePlugin', + 'McIdasImagePlugin', + 'MicImagePlugin', + 'MpegImagePlugin', + 'MpoImagePlugin', + 'MspImagePlugin', + 'PalmImagePlugin', + 'PcdImagePlugin', + 'PcxImagePlugin', + 'PdfImagePlugin', + 'PixarImagePlugin', + 'PngImagePlugin', + 'PpmImagePlugin', + 'PsdImagePlugin', + 'SgiImagePlugin', + 'SpiderImagePlugin', + 'SunImagePlugin', + 'TgaImagePlugin', + 'TiffImagePlugin', + 'WebPImagePlugin', + 'WmfImagePlugin', + 'XbmImagePlugin', + 'XpmImagePlugin', + 'XVThumbImagePlugin'] diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c13d38750d800231ee044fcbb2b23a1dccd26a92 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_binary.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_binary.py new file mode 100644 index 0000000000000000000000000000000000000000..767c13b9d46e16e2e7b90485960e98ed3c6881d0 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_binary.py @@ -0,0 +1,94 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Binary input/output support routines. +# +# Copyright (c) 1997-2003 by Secret Labs AB +# Copyright (c) 1995-2003 by Fredrik Lundh +# Copyright (c) 2012 by Brian Crowell +# +# See the README file for information on usage and redistribution. +# + +from struct import unpack_from, pack +from ._util import py3 + +if py3: + def i8(c): + return c if c.__class__ is int else c[0] + + def o8(i): + return bytes((i & 255,)) +else: + def i8(c): + return ord(c) + + def o8(i): + return chr(i & 255) + + +# Input, le = little endian, be = big endian +def i16le(c, o=0): + """ + Converts a 2-bytes (16 bits) string to an unsigned integer. + + c: string containing bytes to convert + o: offset of bytes to convert in string + """ + return unpack_from("H", c, o)[0] + + +def i32be(c, o=0): + return unpack_from(">I", c, o)[0] + + +# Output, le = little endian, be = big endian +def o16le(i): + return pack("H", i) + + +def o32be(i): + return pack(">I", i) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_binary.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_binary.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b434ee06b157cfba633e0c0f3a04ed14159a30e8 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_binary.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imaging.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imaging.so new file mode 100755 index 0000000000000000000000000000000000000000..16d87480ab5e411e9042ca39cc001066e7b71be4 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imaging.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingcms.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingcms.so new file mode 100755 index 0000000000000000000000000000000000000000..df26fe93554ffff473f6d3d85d4f7ccc1a33809d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingcms.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingft.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingft.so new file mode 100755 index 0000000000000000000000000000000000000000..70791eac2516b93e21b8aef4cd2480a52387ae9e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingft.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingmath.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingmath.so new file mode 100755 index 0000000000000000000000000000000000000000..212553eb191cc9a90ce2369697f1fe817214bdef Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingmath.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingmorph.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingmorph.so new file mode 100755 index 0000000000000000000000000000000000000000..79052a58f9922c47c038a1dbdf124bc65a703107 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingmorph.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingtk.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingtk.so new file mode 100755 index 0000000000000000000000000000000000000000..2db3386578a54dd06e1558df445ed490e8010387 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_imagingtk.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_tkinter_finder.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_tkinter_finder.py new file mode 100644 index 0000000000000000000000000000000000000000..987d9621182bfa72f1f5e8ae68353caf67842370 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_tkinter_finder.py @@ -0,0 +1,20 @@ +""" Find compiled module linking to Tcl / Tk libraries +""" +import sys + +if sys.version_info.major > 2: + from tkinter import _tkinter as tk +else: + from Tkinter import tkinter as tk + +if hasattr(sys, 'pypy_find_executable'): + # Tested with packages at https://bitbucket.org/pypy/pypy/downloads. + # PyPies 1.6, 2.0 do not have tkinter built in. PyPy3-2.3.1 gives an + # OSError trying to import tkinter. Otherwise: + try: # PyPy 5.1, 4.0.0, 2.6.1, 2.6.0 + TKINTER_LIB = tk.tklib_cffi.__file__ + except AttributeError: + # PyPy3 2.4, 2.1-beta1; PyPy 2.5.1, 2.5.0, 2.4.0, 2.3, 2.2, 2.1 + TKINTER_LIB = tk.tkffi.verifier.modulefilename +else: + TKINTER_LIB = tk.__file__ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_tkinter_finder.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_tkinter_finder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c20d9ba920f8f19b674e2b794791c1cb1a56e4c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_tkinter_finder.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_util.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..6618c625fe39720d2d102329fed2b1a6068bb8e1 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_util.py @@ -0,0 +1,29 @@ +import os, sys + +py3 = sys.version_info.major >= 3 + +if py3: + def isStringType(t): + return isinstance(t, str) + + def isPath(f): + return isinstance(f, (bytes, str)) +else: + def isStringType(t): + return isinstance(t, basestring) + + def isPath(f): + return isinstance(f, basestring) + + +# Checks if an object is a string, and that it points to a directory. +def isDirectory(f): + return isPath(f) and os.path.isdir(f) + + +class deferred_error(object): + def __init__(self, ex): + self.ex = ex + + def __getattr__(self, elt): + raise self.ex diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_util.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_util.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b462a80af6ad48f57063d4394b2d3e3e0bdb10e6 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_util.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_version.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..b42628ddeeb2a6b8886d0b616e749ee74806f263 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_version.py @@ -0,0 +1,2 @@ +# Master version for Pillow +__version__ = '5.2.0' diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_version.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_version.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2c809b3c855e7fc4dc8faa17d7b9a75b4ac8fe5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_version.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_webp.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_webp.so new file mode 100755 index 0000000000000000000000000000000000000000..0b8fcc502bc7d14a476e0573ed30bbc1e89f5d27 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/_webp.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/features.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/features.py new file mode 100644 index 0000000000000000000000000000000000000000..9926445ec4ff3ea0845bb2dd17eedbf716fbf870 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/features.py @@ -0,0 +1,85 @@ +from . import Image + +modules = { + "pil": "PIL._imaging", + "tkinter": "PIL._tkinter_finder", + "freetype2": "PIL._imagingft", + "littlecms2": "PIL._imagingcms", + "webp": "PIL._webp", +} + + +def check_module(feature): + if not (feature in modules): + raise ValueError("Unknown module %s" % feature) + + module = modules[feature] + + try: + __import__(module) + return True + except ImportError: + return False + + +def get_supported_modules(): + return [f for f in modules if check_module(f)] + + +codecs = { + "jpg": "jpeg", + "jpg_2000": "jpeg2k", + "zlib": "zip", + "libtiff": "libtiff" +} + + +def check_codec(feature): + if feature not in codecs: + raise ValueError("Unknown codec %s" % feature) + + codec = codecs[feature] + + return codec + "_encoder" in dir(Image.core) + + +def get_supported_codecs(): + return [f for f in codecs if check_codec(f)] + + +features = { + "webp_anim": ("PIL._webp", 'HAVE_WEBPANIM'), + "webp_mux": ("PIL._webp", 'HAVE_WEBPMUX'), + "transp_webp": ("PIL._webp", "HAVE_TRANSPARENCY"), + "raqm": ("PIL._imagingft", "HAVE_RAQM") +} + + +def check_feature(feature): + if feature not in features: + raise ValueError("Unknown feature %s" % feature) + + module, flag = features[feature] + + try: + imported_module = __import__(module, fromlist=['PIL']) + return getattr(imported_module, flag) + except ImportError: + return None + + +def get_supported_features(): + return [f for f in features if check_feature(f)] + + +def check(feature): + return (feature in modules and check_module(feature) or + feature in codecs and check_codec(feature) or + feature in features and check_feature(feature)) + + +def get_supported(): + ret = get_supported_modules() + ret.extend(get_supported_features()) + ret.extend(get_supported_codecs()) + return ret diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/features.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/features.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af50f30f7f32831b66b0b21fb5bae9b0ed45d636 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/PIL/features.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/INSTALLER b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/LICENSE.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..80456a7538644fe77e7aa692866903242e4810ba --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/LICENSE.txt @@ -0,0 +1,16 @@ +The Python Imaging Library (PIL) is + + Copyright © 1997-2011 by Secret Labs AB + Copyright © 1995-2011 by Fredrik Lundh + +Pillow is the friendly PIL fork. It is + + Copyright © 2010-2018 by Alex Clark and contributors + +Like PIL, Pillow is licensed under the open source PIL Software License: + +By obtaining, using, and/or copying this software and/or its associated documentation, you agree that you have read, understood, and will comply with the following terms and conditions: + +Permission to use, copy, modify, and distribute this software and its associated documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies, and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Secret Labs AB or the author not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. + +SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/METADATA b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..1895a9fcb7ad1c3885e4db2e3dd73d0d3d64d243 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/METADATA @@ -0,0 +1,107 @@ +Metadata-Version: 2.1 +Name: Pillow +Version: 5.2.0 +Summary: Python Imaging Library (Fork) +Home-page: http://python-pillow.org +Author: Alex Clark (Fork Author) +Author-email: aclark@aclark.net +License: Standard PIL License +Keywords: Imaging +Platform: UNKNOWN +Classifier: Development Status :: 6 - Mature +Classifier: Topic :: Multimedia :: Graphics +Classifier: Topic :: Multimedia :: Graphics :: Capture :: Digital Camera +Classifier: Topic :: Multimedia :: Graphics :: Capture :: Screen Capture +Classifier: Topic :: Multimedia :: Graphics :: Graphics Conversion +Classifier: Topic :: Multimedia :: Graphics :: Viewers +Classifier: License :: Other/Proprietary License +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* + +Pillow +====== + +Python Imaging Library (Fork) +----------------------------- + +Pillow is the friendly PIL fork by `Alex Clark and Contributors `_. PIL is the Python Imaging Library by Fredrik Lundh and Contributors. + +.. start-badges + +.. list-table:: + :stub-columns: 1 + + * - docs + - |docs| + * - tests + - |linux| |macos| |windows| |coverage| + * - package + - |zenodo| |version| + * - social + - |gitter| |twitter| + +.. |docs| image:: https://readthedocs.org/projects/pillow/badge/?version=latest + :target: https://pillow.readthedocs.io/?badge=latest + :alt: Documentation Status + +.. |linux| image:: https://img.shields.io/travis/python-pillow/Pillow/master.svg?label=Linux%20build + :target: https://travis-ci.org/python-pillow/Pillow + :alt: Travis CI build status (Linux) + +.. |macos| image:: https://img.shields.io/travis/python-pillow/pillow-wheels/latest.svg?label=macOS%20build + :target: https://travis-ci.org/python-pillow/pillow-wheels + :alt: Travis CI build status (macOS) + +.. |windows| image:: https://img.shields.io/appveyor/ci/python-pillow/Pillow/master.svg?label=Windows%20build + :target: https://ci.appveyor.com/project/python-pillow/Pillow + :alt: AppVeyor CI build status (Windows) + +.. |coverage| image:: https://coveralls.io/repos/python-pillow/Pillow/badge.svg?branch=master&service=github + :target: https://coveralls.io/github/python-pillow/Pillow?branch=master + :alt: Code coverage + +.. |zenodo| image:: https://zenodo.org/badge/17549/python-pillow/Pillow.svg + :target: https://zenodo.org/badge/latestdoi/17549/python-pillow/Pillow + +.. |version| image:: https://img.shields.io/pypi/v/pillow.svg + :target: https://pypi.org/project/Pillow/ + :alt: Latest PyPI version + +.. |gitter| image:: https://badges.gitter.im/python-pillow/Pillow.svg + :target: https://gitter.im/python-pillow/Pillow?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + :alt: Join the chat at https://gitter.im/python-pillow/Pillow + +.. |twitter| image:: https://img.shields.io/badge/tweet-on%20Twitter-00aced.svg + :target: https://twitter.com/PythonPillow + :alt: Follow on https://twitter.com/PythonPillow + +.. end-badges + + + +More Information +---------------- + +- `Documentation `_ + + - `Installation `_ + - `Handbook `_ + +- `Contribute `_ + + - `Issues `_ + - `Pull requests `_ + +- `Changelog `_ + + - `Pre-fork `_ + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/RECORD b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..450cf9059d5ff2aaefaaa8a604e3cf7a841c5c86 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/RECORD @@ -0,0 +1,209 @@ +PIL/.libs/libfreetype-6ed94974.so.6.16.1,sha256=7HFYgY53-x4NVIP14eHvGwPGLZmKs327kxs_1m6PvVU,1254200 +PIL/.libs/libjpeg-3fe7dfc0.so.9.3.0,sha256=4kAV2SMqD-qa-LK26xsUdUGe-hXdNFhUHA4hdWI6Cno,344704 +PIL/.libs/liblcms2-a6801db4.so.2.0.8,sha256=0yDBrtoWH6dLJfk2KCgE41hpqrFbc43rDLZXFfhpUtU,453104 +PIL/.libs/liblzma-90de1f11.so.5.2.2,sha256=osdreMNRCe59cUbV9pPueaURTBIJV76m044DdBtzOOU,216664 +PIL/.libs/libopenjp2-e366d6b0.so.2.1.0,sha256=GpeLTsAMZNDD1IaIBxtt43XQKFVpMFaEBaXD6aGlOzg,310256 +PIL/.libs/libpng16-8793a1b2.so.16.32.0,sha256=LlPL_INpz4apwAtBHAanWVI3TBtXHnyF2Rf4v9-rVLc,279824 +PIL/.libs/libtiff-8a6d997d.so.5.3.0,sha256=SMU1y4pKsu0c8VjLD_e5lvf-Uh0czIo75AFtjJ8nn-0,636464 +PIL/.libs/libwebp-8ccd29fd.so.7.0.2,sha256=XjUXjc-WovRk8DUqvYt3_iXD_JoUDGV3NFsveY75MAo,776912 +PIL/.libs/libwebpdemux-eba3dc32.so.2.0.4,sha256=xO0QPVfm6m-9i4MpFv5-YSpyR-fjzXWeQpZSxbK-dDc,29512 +PIL/.libs/libwebpmux-1c63fe99.so.3.0.2,sha256=Ld9g5ZSZYMm4cfF17XTBsqSuW-JBFXrSXmCwuXfNW8c,58600 +PIL/.libs/libz-a147dcb0.so.1.2.3,sha256=1IGoOjRpujOMRn7cZ29ERtAxBt6SxTUlRLBkSqa_lsk,87848 +PIL/BdfFontFile.py,sha256=dx--JzHUxLiUf0eyW2znQP4HRe941iZfDInK_l6A4J0,3367 +PIL/BdfFontFile.pyc,, +PIL/BlpImagePlugin.py,sha256=40hyRVgy4bTykbO8n_P-T_e-ZosJeuGWPsz8TKwTo5A,14415 +PIL/BlpImagePlugin.pyc,, +PIL/BmpImagePlugin.py,sha256=zQMJyXF1ugH9OqG1PaNgai1fCHsn2xLIpX1WVlQ5ZwQ,13137 +PIL/BmpImagePlugin.pyc,, +PIL/BufrStubImagePlugin.py,sha256=vlns5HsnrKuFom-0NR_WGLqv5k1Q07CnVCnup3eJkRE,1518 +PIL/BufrStubImagePlugin.pyc,, +PIL/ContainerIO.py,sha256=FZUZlQJh6ItrIC0W48B8pOwgqywZNH4YXPGQXqLbifE,2693 +PIL/ContainerIO.pyc,, +PIL/CurImagePlugin.py,sha256=fet0bN2V623xAbLT0YMzx8XeFCNbGnsGhI-7SB88t7g,2071 +PIL/CurImagePlugin.pyc,, +PIL/DcxImagePlugin.py,sha256=H-pUUjreLV8pcigxXi76XSaGCsfjugZVRJpjt5yV9dE,2025 +PIL/DcxImagePlugin.pyc,, +PIL/DdsImagePlugin.py,sha256=htg9LmzsEsha_NgxhUlQNgDw_Cp7YJMImTQ5vlEoPxc,5040 +PIL/DdsImagePlugin.pyc,, +PIL/EpsImagePlugin.py,sha256=rd6jB8VYaL741fzOOtg7NSBHzv8N8_aL1zDHv8WZ5DM,12883 +PIL/EpsImagePlugin.pyc,, +PIL/ExifTags.py,sha256=-Auwh-hnhpOuBCo6SW5hEmToJYmbjat13TTG4OnF9Q4,8837 +PIL/ExifTags.pyc,, +PIL/FitsStubImagePlugin.py,sha256=-kjVbGTjhKuVgmvaGryFyXEA0LZm7WofH7f0yR9wiXI,1622 +PIL/FitsStubImagePlugin.pyc,, +PIL/FliImagePlugin.py,sha256=Ef4A0mCYbk2cW7jWzp9sfpCVOALgjs1RJeyCpi28rsM,4093 +PIL/FliImagePlugin.pyc,, +PIL/FontFile.py,sha256=r_I7yEY1GGfgIv0l2AhzLNAjROWTaliEzsjmAwFT0Kc,2840 +PIL/FontFile.pyc,, +PIL/FpxImagePlugin.py,sha256=iIJ8QqOp9GlHrdFNZgEdkzzOAybHvfCdHPbgk3eTiT8,6379 +PIL/FpxImagePlugin.pyc,, +PIL/FtexImagePlugin.py,sha256=lAkYhwcmJEwMoVQZkrul7zo45QsDXom-QwZoSRXIHRQ,3268 +PIL/FtexImagePlugin.pyc,, +PIL/GbrImagePlugin.py,sha256=cztw3cPdpEjzlQnWj8mnC2rZxUR4G65v2ZeXhGc17JE,2723 +PIL/GbrImagePlugin.pyc,, +PIL/GdImageFile.py,sha256=3lsM1dhyf_UlxFo-ERCQc5FQ3wg1ROh5o7veQ0JiZTs,2266 +PIL/GdImageFile.pyc,, +PIL/GifImagePlugin.py,sha256=x4pWwoNEZoMKU1LGUoJpbGCW4MUSgPfmPOeAc8UjTt0,26714 +PIL/GifImagePlugin.pyc,, +PIL/GimpGradientFile.py,sha256=zs7-vHAdVCInRQRDx2K5fBpofMlRR0GRKxGVmcUGMag,3345 +PIL/GimpGradientFile.pyc,, +PIL/GimpPaletteFile.py,sha256=1o3b3WhPCeUHz_9uWforK0iQ_OBeGoFMHnEsn9lfCjM,1339 +PIL/GimpPaletteFile.pyc,, +PIL/GribStubImagePlugin.py,sha256=Y2p_W8PTvx3OCnpsOD71NJceoJtWmgWy_acOt5wSV3E,1541 +PIL/GribStubImagePlugin.pyc,, +PIL/Hdf5StubImagePlugin.py,sha256=1nacrTpO0tNzbVvek0dvxRbAmlnxi8n-J6gds-WODEo,1515 +PIL/Hdf5StubImagePlugin.pyc,, +PIL/IcnsImagePlugin.py,sha256=DfSq14eRyR93Jkj1j4Sffjb31wxnV9QTyhvM3-cHSg8,10980 +PIL/IcnsImagePlugin.pyc,, +PIL/IcoImagePlugin.py,sha256=wNdwhS3j3lhjLoRJ9CpHwkmeK9rqJm7TW2AyjRd3b1M,9331 +PIL/IcoImagePlugin.pyc,, +PIL/ImImagePlugin.py,sha256=vIi4nDl6ISY8Ox7e8KNiQM9JaI_crFZEcvX-FqZhpLQ,10162 +PIL/ImImagePlugin.pyc,, +PIL/Image.py,sha256=GK2CZ54qxV29EGiY1MYGCVTp07-mD7SOBX4WSSjtKvU,96860 +PIL/Image.pyc,, +PIL/ImageChops.py,sha256=f07JlSm9Ln3-PPIM1-ylbchp29FSnOPunFyciLZyMyc,6182 +PIL/ImageChops.pyc,, +PIL/ImageCms.py,sha256=_oTgU2CwnkLyp1lJIxtMdSuYer-AsrEWnPXPO2N5sig,36616 +PIL/ImageCms.pyc,, +PIL/ImageColor.py,sha256=ctBZpa78H8kqxM8kqpT12c0tw0D812YWy-KtRl-mupA,8703 +PIL/ImageColor.pyc,, +PIL/ImageDraw.py,sha256=nuGekoQeLk6AhiNMUF4P3LwZRk_HHVTDH86EYxeZLic,13689 +PIL/ImageDraw.pyc,, +PIL/ImageDraw2.py,sha256=kpFXgNEmf2Yn16zy9TpU4_z7ekN5sEhlKmmTXwnC3eg,3127 +PIL/ImageDraw2.pyc,, +PIL/ImageEnhance.py,sha256=eQUg_Uzi6PbeIaghBmtc6r5pdLvevFAqbn6C_1fHL1w,3195 +PIL/ImageEnhance.pyc,, +PIL/ImageFile.py,sha256=y7tyTk--nl86ZBW0VpfIoOg1EhA2wk9rQ8J0vyBE4hA,20584 +PIL/ImageFile.pyc,, +PIL/ImageFilter.py,sha256=1-b1Vi8SfxgWZoO_jbwTaGnOFc2ExHFDwihFYBgLyFI,15383 +PIL/ImageFilter.pyc,, +PIL/ImageFont.py,sha256=Did8yfJWkazO1HqVqh14GMrfgS6Vgzq0SpjnWk50RYY,21369 +PIL/ImageFont.pyc,, +PIL/ImageGrab.py,sha256=q7wTv2h-t6-2KqKKVIhLg3lNTTAhlo45bErT368pGz8,2150 +PIL/ImageGrab.pyc,, +PIL/ImageMath.py,sha256=k3KvcxS5Vm07IWVnZqq0Fx8ceVB1MdsBlYHoLYrpDys,7463 +PIL/ImageMath.pyc,, +PIL/ImageMode.py,sha256=frHm-hptD7oCyWbawKtYsjb3NV8CHNXn8R0dyrAStAE,1558 +PIL/ImageMode.pyc,, +PIL/ImageMorph.py,sha256=bgj3maI0a2XIg7A3_K2H_5aN14Elwqu4038GKQqEwgI,8370 +PIL/ImageMorph.pyc,, +PIL/ImageOps.py,sha256=AIONtNN8RgARADWmBPoiwazH-gukMECsO0gv5yXUOv8,16106 +PIL/ImageOps.pyc,, +PIL/ImagePalette.py,sha256=pZ_GQHxMFNnlkcMFDbdto-Iz86FWgxQnyugfxRxWjSY,6321 +PIL/ImagePalette.pyc,, +PIL/ImagePath.py,sha256=IPUmk_1SdD5NjpZWuwDEDMrRIq_izWrhiczq7zFgLl8,337 +PIL/ImagePath.pyc,, +PIL/ImageQt.py,sha256=JC5fEcAn9q0r3bH36a9YgBQwyPIQ1fd8RXrYHoWLW2k,6321 +PIL/ImageQt.pyc,, +PIL/ImageSequence.py,sha256=fp7ziB8L6zhEXz8bTrU5sYCdGnZ7OzADEOsvCd37Vc4,1240 +PIL/ImageSequence.pyc,, +PIL/ImageShow.py,sha256=tH6tIj8jh__hOeAUpnRDgSiU15kfhaOaoc8cREe5OTU,5262 +PIL/ImageShow.pyc,, +PIL/ImageStat.py,sha256=UkQwWW3JBfk7HtGdUf3EyZOyHGNuhp1TNOUmgXgyB0Q,3848 +PIL/ImageStat.pyc,, +PIL/ImageTk.py,sha256=mWWvseOE7X7PW-v6rZoUK4vZ0LduZZ4HjEpnPPmiDx0,9360 +PIL/ImageTk.pyc,, +PIL/ImageTransform.py,sha256=3tSnRn747qaNC-8BaOC0T1CyeMJoaKUzpLEwCPKyHFs,2839 +PIL/ImageTransform.pyc,, +PIL/ImageWin.py,sha256=cH6bBrWyk42copvCfPILYhpTkdngxA1d8v1S7R9ol-Y,7217 +PIL/ImageWin.pyc,, +PIL/ImtImagePlugin.py,sha256=WDXZldcCJiB0-dwjzdmcjz2JkRFBaj4QROWHj_kY3Yg,2240 +PIL/ImtImagePlugin.pyc,, +PIL/IptcImagePlugin.py,sha256=n-ZTYv4LEQ3AuvhZpJlDdFnJEVEK3caBHEJ1hpD84SY,6798 +PIL/IptcImagePlugin.pyc,, +PIL/Jpeg2KImagePlugin.py,sha256=irT7mcxIMc1arvJcVPRadpmzimEn1XkWKomRKpvJbZE,7730 +PIL/Jpeg2KImagePlugin.pyc,, +PIL/JpegImagePlugin.py,sha256=OxIm8FHtQW0Eo8jJnVPtluAQEqkLd8wKC4x9TnxnjrY,27685 +PIL/JpegImagePlugin.pyc,, +PIL/JpegPresets.py,sha256=t9_TuyTIVp9RkXlIv3BVLEh7T1NZtVZwzzLpIlcJiMQ,12399 +PIL/JpegPresets.pyc,, +PIL/McIdasImagePlugin.py,sha256=MAyW-gPGI8Vpq5bcc4_Fd95DEwFH-zTfUZEe3A1lJ_0,1768 +PIL/McIdasImagePlugin.pyc,, +PIL/MicImagePlugin.py,sha256=X0tFPFlbaHeY4tI70AmzViUs781-u-ycrqHM7Hb9oKk,2460 +PIL/MicImagePlugin.pyc,, +PIL/MpegImagePlugin.py,sha256=1a3yu5UpkAjRRQu_BnSuX90s9doVulfc7va9OT0THQU,1831 +PIL/MpegImagePlugin.pyc,, +PIL/MpoImagePlugin.py,sha256=tLqF8CuTfi2l1ebgIzVeCsSEGwt0wBnWYPrTyXQxTRU,2982 +PIL/MpoImagePlugin.pyc,, +PIL/MspImagePlugin.py,sha256=HXn-fjPaOH1X2w_UCARNdxXKbvfPS0u5jT_z5B0orKw,5518 +PIL/MspImagePlugin.pyc,, +PIL/OleFileIO.py,sha256=EJ54RgQCUPoQjO0lDdoe34MeOd9IH_RwYH2GKpKYlPY,152 +PIL/OleFileIO.pyc,, +PIL/PSDraw.py,sha256=hQuLYYkxbTOL6fw4eomK_Rop0U0JWZIlljBwtpj_jes,6870 +PIL/PSDraw.pyc,, +PIL/PaletteFile.py,sha256=xnAUCKhUxSIeqqn--4DFPRaNtVxeqoNvYgdzq_7kidQ,1110 +PIL/PaletteFile.pyc,, +PIL/PalmImagePlugin.py,sha256=-4otkAWd40ykwzLq06CZ8QWb_bFZO_cLirtZ_ZeP-7s,9150 +PIL/PalmImagePlugin.pyc,, +PIL/PcdImagePlugin.py,sha256=kYXV7kOTRxFmlvqYvDjy_4qNeeZjAbfZe921H7RTmsI,1519 +PIL/PcdImagePlugin.pyc,, +PIL/PcfFontFile.py,sha256=BdhMHBLk_DMvnO6IAe_IdddpHwZRjXPEzTeh2oglylQ,6136 +PIL/PcfFontFile.pyc,, +PIL/PcxImagePlugin.py,sha256=2FrzOXe_GdSajFPBZAfoJxqs48AT8B4RABFD_JZ5FMI,5224 +PIL/PcxImagePlugin.pyc,, +PIL/PdfImagePlugin.py,sha256=JcB71T4CPNCLBzZGkJmGFqy64WktRIlU0xWxCrS4cfg,7392 +PIL/PdfImagePlugin.pyc,, +PIL/PdfParser.py,sha256=gRIHXRXnQRWRcTapFFw5ufGytISfQthRHwIFVnjKo1M,33203 +PIL/PdfParser.pyc,, +PIL/PixarImagePlugin.py,sha256=IOu9VzegyC5RKTCfF6OHEgAza-6PZRDa7w_dBWIB5tY,1674 +PIL/PixarImagePlugin.pyc,, +PIL/PngImagePlugin.py,sha256=3l7LdeEXTENzZ1M-Y_5Jn-Ipebn_haMH2oyY3kYhjsE,25677 +PIL/PngImagePlugin.pyc,, +PIL/PpmImagePlugin.py,sha256=PG_t0FvDZDgxnUVgWn46MDamjCDkYNn0SLVUnj_i2P4,4212 +PIL/PpmImagePlugin.pyc,, +PIL/PsdImagePlugin.py,sha256=hJfd4YVj7wmE0kE5TkhuSagLbx37fJNb39Uto3tO4Yo,7550 +PIL/PsdImagePlugin.pyc,, +PIL/PyAccess.py,sha256=rIH60zn4NcjZmlMp5sZowFTWLjzUqvooECNZa87u9WQ,8895 +PIL/PyAccess.pyc,, +PIL/SgiImagePlugin.py,sha256=FQ4ottrWgu_5KLosbLbjG_3yFXYMtDryBlyXTCLIz2c,6125 +PIL/SgiImagePlugin.pyc,, +PIL/SpiderImagePlugin.py,sha256=3xaZ5uJNTPaUVRnW6p8ynfAMvCPp2_uXg8Hdkphq76c,9341 +PIL/SpiderImagePlugin.pyc,, +PIL/SunImagePlugin.py,sha256=1CRfLHH_gYgUJTSGa4yjLgwi4XF9CC5MtVloYHN3vus,4320 +PIL/SunImagePlugin.pyc,, +PIL/TarIO.py,sha256=0f0geSoJ9B0PPcM0jMcCCfa94hwYDS6hth7FRNFwmpM,1239 +PIL/TarIO.pyc,, +PIL/TgaImagePlugin.py,sha256=e_J9Jw2HbfLPd3354Ji23LEl2xsprkBOuVe3y_kks-I,5351 +PIL/TgaImagePlugin.pyc,, +PIL/TiffImagePlugin.py,sha256=_N4cti2SZ2tTf8ST2ALUbrC3b3etvELpnoDQFOIabrw,65125 +PIL/TiffImagePlugin.pyc,, +PIL/TiffTags.py,sha256=sKmw5-orRfR_KiltQIlpGZi46kLA_o27PSfqk2gXLOY,14438 +PIL/TiffTags.pyc,, +PIL/WalImageFile.py,sha256=rKggtH36cF0a7NaiwrxfWS1Lq2zSoI9bJfGvW5LIax8,5558 +PIL/WalImageFile.pyc,, +PIL/WebPImagePlugin.py,sha256=0hv4aLLMkiw9rC0tChuT-m1Bw0FNuCaEx4xgghStJYQ,9674 +PIL/WebPImagePlugin.pyc,, +PIL/WmfImagePlugin.py,sha256=eufTX7sE4UlGvbweIPCkq6UbV1iy8rZQN1mOMhIgWXQ,4267 +PIL/WmfImagePlugin.pyc,, +PIL/XVThumbImagePlugin.py,sha256=IFiuUbhqOdBH_5B8smM2aB29SDSe815JWwiXef-k_rU,1959 +PIL/XVThumbImagePlugin.pyc,, +PIL/XbmImagePlugin.py,sha256=uYemmzX39rtW_-XApHIr4so4qVkmlzdr-wMZiZaEV7s,2504 +PIL/XbmImagePlugin.pyc,, +PIL/XpmImagePlugin.py,sha256=5HtNdyiNpIVtjsjhTOaWTq038HbScPojyPBf-vRQrFQ,3102 +PIL/XpmImagePlugin.pyc,, +PIL/__init__.py,sha256=P5K0X-uz8-kB0YTvWmC-WWkZ6D8woM6_BOLRvXV90os,2167 +PIL/__init__.pyc,, +PIL/_binary.py,sha256=Wy_0dJYiAzbkg7rKBi2e9os-UsPqcgPFMaGUW1x7vNE,1822 +PIL/_binary.pyc,, +PIL/_imaging.so,sha256=4HOgx8nOgV1pVps_zf-IyByeanRWz6p28vvblyHsIMs,590992 +PIL/_imagingcms.so,sha256=SNkNkSbP7RvcD1JjBGbdbM7p-EZLbB5jEwvfiq07IFU,45080 +PIL/_imagingft.so,sha256=YNDZVHwFZhiwGpWNq01jRnHLqYvdpnZUkGyUGQcF1sM,35776 +PIL/_imagingmath.so,sha256=nvqCOk3TGnndpiaZdj-TGmjAvbtKYZGvu4cXVbPmJRk,24856 +PIL/_imagingmorph.so,sha256=7WMh1EmPLH8YOQCYwlRgkM2DwwHzB6lma9wF0CYq_DM,8056 +PIL/_imagingtk.so,sha256=SgF4SvI046T1er-Qj3MXm1jicq5g-utlaJjAl_PhbNQ,8984 +PIL/_tkinter_finder.py,sha256=OxAeW-nXH-BLvlWO-YjwMtaTG33_UQ5kmR3IbtyMAH8,702 +PIL/_tkinter_finder.pyc,, +PIL/_util.py,sha256=285thA2BR8MOP5fUupc3rOAF56Imy50uqkEH44GxqDM,584 +PIL/_util.pyc,, +PIL/_version.py,sha256=AgUEioJ4MrtghIObrh6qtCts3TpNe_R2tzkLf1rMQR4,50 +PIL/_version.pyc,, +PIL/_webp.so,sha256=-WE0UK4GkeCpN36__USQY5kXqjN4OaKGcizPzh1QNzs,39328 +PIL/features.py,sha256=9D3LoufNcZvNvp8ExVAqdequM0vD1LF_puH_byd3h38,1874 +PIL/features.pyc,, +Pillow-5.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Pillow-5.2.0.dist-info/LICENSE.txt,sha256=SWD8GL7MKGvoOcj_F71wzVIiUz92ESkUIVNENxiiUZk,1452 +Pillow-5.2.0.dist-info/METADATA,sha256=BaSj0IUHC5xF-YKLAqQAOj74Z_T33akeoIAEaO4eJho,4109 +Pillow-5.2.0.dist-info/RECORD,, +Pillow-5.2.0.dist-info/WHEEL,sha256=M5Ujap42zjfAFnpJOoFU72TFHuBKh-JF0Rqu5vZhkVE,110 +Pillow-5.2.0.dist-info/top_level.txt,sha256=riZqrk-hyZqh5f1Z0Zwii3dKfxEsByhu9cU9IODF-NY,4 +Pillow-5.2.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/WHEEL b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..295a0ca5470911d5713ce5907980a0875ec667ff --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.1) +Root-Is-Purelib: false +Tag: cp27-cp27mu-manylinux1_x86_64 + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/top_level.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..b338169ce0c740c335bfe82912227ae8637bd492 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +PIL diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/zip-safe b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/zip-safe new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Pillow-5.2.0.dist-info/zip-safe @@ -0,0 +1 @@ + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/DESCRIPTION.rst b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000000000000000000000000000000000000..675f08d10a5cab38d45faac2e7a324e14d79071b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/DESCRIPTION.rst @@ -0,0 +1,80 @@ +Werkzeug +======== + +Werkzeug is a comprehensive `WSGI`_ web application library. It began as +a simple collection of various utilities for WSGI applications and has +become one of the most advanced WSGI utility libraries. + +It includes: + +* An interactive debugger that allows inspecting stack traces and source + code in the browser with an interactive interpreter for any frame in + the stack. +* A full-featured request object with objects to interact with headers, + query args, form data, files, and cookies. +* A response object that can wrap other WSGI applications and handle + streaming data. +* A routing system for matching URLs to endpoints and generating URLs + for endpoints, with an extensible system for capturing variables from + URLs. +* HTTP utilities to handle entity tags, cache control, dates, user + agents, cookies, files, and more. +* A threaded WSGI server for use while developing applications locally. +* A test client for simulating HTTP requests during testing without + requiring running a server. + +Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up +to the developer to choose a template engine, database adapter, and even +how to handle requests. It can be used to build all sorts of end user +applications such as blogs, wikis, or bulletin boards. + +`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while +providing more structure and patterns for defining powerful +applications. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U Werkzeug + + +A Simple Example +---------------- + +.. code-block:: python + + from werkzeug.wrappers import Request, Response + + @Request.application + def application(request): + return Response('Hello, World!') + + if __name__ == '__main__': + from werkzeug.serving import run_simple + run_simple('localhost', 4000, application) + + +Links +----- + +* Website: https://www.palletsprojects.com/p/werkzeug/ +* Releases: https://pypi.org/project/Werkzeug/ +* Code: https://github.com/pallets/werkzeug +* Issue tracker: https://github.com/pallets/werkzeug/issues +* Test status: + + * Linux, Mac: https://travis-ci.org/pallets/werkzeug + * Windows: https://ci.appveyor.com/project/davidism/werkzeug + +* Test coverage: https://codecov.io/gh/pallets/werkzeug + +.. _WSGI: https://wsgi.readthedocs.io/en/latest/ +.. _Flask: https://www.palletsprojects.com/p/flask/ +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/INSTALLER b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/LICENSE.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..1cc75bb0e33350dedca65a87d16333ff9a4e8471 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/LICENSE.txt @@ -0,0 +1,31 @@ +Copyright © 2007 by the Pallets team. + +Some rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/METADATA b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..bfc3c4e89623744f7492d19623c8e87130b28c7e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/METADATA @@ -0,0 +1,116 @@ +Metadata-Version: 2.0 +Name: Werkzeug +Version: 0.14.1 +Summary: The comprehensive WSGI web application library. +Home-page: https://www.palletsprojects.org/p/werkzeug/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Description-Content-Type: UNKNOWN +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides-Extra: dev +Requires-Dist: coverage; extra == 'dev' +Requires-Dist: pytest; extra == 'dev' +Requires-Dist: sphinx; extra == 'dev' +Requires-Dist: tox; extra == 'dev' +Provides-Extra: termcolor +Requires-Dist: termcolor; extra == 'termcolor' +Provides-Extra: watchdog +Requires-Dist: watchdog; extra == 'watchdog' + +Werkzeug +======== + +Werkzeug is a comprehensive `WSGI`_ web application library. It began as +a simple collection of various utilities for WSGI applications and has +become one of the most advanced WSGI utility libraries. + +It includes: + +* An interactive debugger that allows inspecting stack traces and source + code in the browser with an interactive interpreter for any frame in + the stack. +* A full-featured request object with objects to interact with headers, + query args, form data, files, and cookies. +* A response object that can wrap other WSGI applications and handle + streaming data. +* A routing system for matching URLs to endpoints and generating URLs + for endpoints, with an extensible system for capturing variables from + URLs. +* HTTP utilities to handle entity tags, cache control, dates, user + agents, cookies, files, and more. +* A threaded WSGI server for use while developing applications locally. +* A test client for simulating HTTP requests during testing without + requiring running a server. + +Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up +to the developer to choose a template engine, database adapter, and even +how to handle requests. It can be used to build all sorts of end user +applications such as blogs, wikis, or bulletin boards. + +`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while +providing more structure and patterns for defining powerful +applications. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U Werkzeug + + +A Simple Example +---------------- + +.. code-block:: python + + from werkzeug.wrappers import Request, Response + + @Request.application + def application(request): + return Response('Hello, World!') + + if __name__ == '__main__': + from werkzeug.serving import run_simple + run_simple('localhost', 4000, application) + + +Links +----- + +* Website: https://www.palletsprojects.com/p/werkzeug/ +* Releases: https://pypi.org/project/Werkzeug/ +* Code: https://github.com/pallets/werkzeug +* Issue tracker: https://github.com/pallets/werkzeug/issues +* Test status: + + * Linux, Mac: https://travis-ci.org/pallets/werkzeug + * Windows: https://ci.appveyor.com/project/davidism/werkzeug + +* Test coverage: https://codecov.io/gh/pallets/werkzeug + +.. _WSGI: https://wsgi.readthedocs.io/en/latest/ +.. _Flask: https://www.palletsprojects.com/p/flask/ +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/RECORD b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..450d790dfd5b1a4d68d0e1c6dd05b3f7d95f8a00 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/RECORD @@ -0,0 +1,97 @@ +Werkzeug-0.14.1.dist-info/DESCRIPTION.rst,sha256=rOCN36jwsWtWsTpqPG96z7FMilB5qI1CIARSKRuUmz8,2452 +Werkzeug-0.14.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Werkzeug-0.14.1.dist-info/LICENSE.txt,sha256=xndz_dD4m269AF9l_Xbl5V3tM1N3C1LoZC2PEPxWO-8,1534 +Werkzeug-0.14.1.dist-info/METADATA,sha256=FbfadrPdJNUWAxMOKxGUtHe5R3IDSBKYYmAz3FvI3uY,3872 +Werkzeug-0.14.1.dist-info/RECORD,, +Werkzeug-0.14.1.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 +Werkzeug-0.14.1.dist-info/metadata.json,sha256=4489UTt6HBp2NQil95-pBkjU4Je93SMHvMxZ_rjOpqA,1452 +Werkzeug-0.14.1.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9 +werkzeug/__init__.py,sha256=NR0d4n_-U9BLVKlOISean3zUt2vBwhvK-AZE6M0sC0k,6842 +werkzeug/__init__.pyc,, +werkzeug/_compat.py,sha256=8c4U9o6A_TR9nKCcTbpZNxpqCXcXDVIbFawwKM2s92c,6311 +werkzeug/_compat.pyc,, +werkzeug/_internal.py,sha256=GhEyGMlsSz_tYjsDWO9TG35VN7304MM8gjKDrXLEdVc,13873 +werkzeug/_internal.pyc,, +werkzeug/_reloader.py,sha256=AyPphcOHPbu6qzW0UbrVvTDJdre5WgpxbhIJN_TqzUc,9264 +werkzeug/_reloader.pyc,, +werkzeug/contrib/__init__.py,sha256=f7PfttZhbrImqpr5Ezre8CXgwvcGUJK7zWNpO34WWrw,623 +werkzeug/contrib/__init__.pyc,, +werkzeug/contrib/atom.py,sha256=qqfJcfIn2RYY-3hO3Oz0aLq9YuNubcPQ_KZcNsDwVJo,15575 +werkzeug/contrib/atom.pyc,, +werkzeug/contrib/cache.py,sha256=xBImHNj09BmX_7kC5NUCx8f_l4L8_O7zi0jCL21UZKE,32163 +werkzeug/contrib/cache.pyc,, +werkzeug/contrib/fixers.py,sha256=gR06T-w71ur-tHQ_31kP_4jpOncPJ4Wc1dOqTvYusr8,10179 +werkzeug/contrib/fixers.pyc,, +werkzeug/contrib/iterio.py,sha256=RlqDvGhz0RneTpzE8dVc-yWCUv4nkPl1jEc_EDp2fH0,10814 +werkzeug/contrib/iterio.pyc,, +werkzeug/contrib/jsrouting.py,sha256=QTmgeDoKXvNK02KzXgx9lr3cAH6fAzpwF5bBdPNvJPs,8564 +werkzeug/contrib/jsrouting.pyc,, +werkzeug/contrib/limiter.py,sha256=iS8-ahPZ-JLRnmfIBzxpm7O_s3lPsiDMVWv7llAIDCI,1334 +werkzeug/contrib/limiter.pyc,, +werkzeug/contrib/lint.py,sha256=Mj9NeUN7s4zIUWeQOAVjrmtZIcl3Mm2yDe9BSIr9YGE,12558 +werkzeug/contrib/lint.pyc,, +werkzeug/contrib/profiler.py,sha256=ISwCWvwVyGpDLRBRpLjo_qUWma6GXYBrTAco4PEQSHY,5151 +werkzeug/contrib/profiler.pyc,, +werkzeug/contrib/securecookie.py,sha256=uWMyHDHY3lkeBRiCSayGqWkAIy4a7xAbSE_Hln9ecqc,12196 +werkzeug/contrib/securecookie.pyc,, +werkzeug/contrib/sessions.py,sha256=39LVNvLbm5JWpbxM79WC2l87MJFbqeISARjwYbkJatw,12577 +werkzeug/contrib/sessions.pyc,, +werkzeug/contrib/testtools.py,sha256=G9xN-qeihJlhExrIZMCahvQOIDxdL9NiX874jiiHFMs,2453 +werkzeug/contrib/testtools.pyc,, +werkzeug/contrib/wrappers.py,sha256=v7OYlz7wQtDlS9fey75UiRZ1IkUWqCpzbhsLy4k14Hw,10398 +werkzeug/contrib/wrappers.pyc,, +werkzeug/datastructures.py,sha256=3IgNKNqrz-ZjmAG7y3YgEYK-enDiMT_b652PsypWcYg,90080 +werkzeug/datastructures.pyc,, +werkzeug/debug/__init__.py,sha256=uSn9BqCZ5E3ySgpoZtundpROGsn-uYvZtSFiTfAX24M,17452 +werkzeug/debug/__init__.pyc,, +werkzeug/debug/console.py,sha256=n3-dsKk1TsjnN-u4ZgmuWCU_HO0qw5IA7ttjhyyMM6I,5607 +werkzeug/debug/console.pyc,, +werkzeug/debug/repr.py,sha256=bKqstDYGfECpeLerd48s_hxuqK4b6UWnjMu3d_DHO8I,9340 +werkzeug/debug/repr.pyc,, +werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673 +werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507 +werkzeug/debug/shared/debugger.js,sha256=PKPVYuyO4SX1hkqLOwCLvmIEO5154WatFYaXE-zIfKI,6264 +werkzeug/debug/shared/jquery.js,sha256=7LkWEzqTdpEfELxcZZlS6wAx5Ff13zZ83lYO2_ujj7g,95957 +werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191 +werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200 +werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818 +werkzeug/debug/shared/style.css,sha256=IEO0PC2pWmh2aEyGCaN--txuWsRCliuhlbEhPDFwh0A,6270 +werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220 +werkzeug/debug/tbtools.py,sha256=rBudXCmkVdAKIcdhxANxgf09g6kQjJWW9_5bjSpr4OY,18451 +werkzeug/debug/tbtools.pyc,, +werkzeug/exceptions.py,sha256=3wp95Hqj9FqV8MdikV99JRcHse_fSMn27V8tgP5Hw2c,20505 +werkzeug/exceptions.pyc,, +werkzeug/filesystem.py,sha256=hHWeWo_gqLMzTRfYt8-7n2wWcWUNTnDyudQDLOBEICE,2175 +werkzeug/filesystem.pyc,, +werkzeug/formparser.py,sha256=mUuCwjzjb8_E4RzrAT2AioLuZSYpqR1KXTK6LScRYzA,21722 +werkzeug/formparser.pyc,, +werkzeug/http.py,sha256=RQg4MJuhRv2isNRiEh__Phh09ebpfT3Kuu_GfrZ54_c,40079 +werkzeug/http.pyc,, +werkzeug/local.py,sha256=QdQhWV5L8p1Y1CJ1CDStwxaUs24SuN5aebHwjVD08C8,14553 +werkzeug/local.pyc,, +werkzeug/posixemulation.py,sha256=xEF2Bxc-vUCPkiu4IbfWVd3LW7DROYAT-ExW6THqyzw,3519 +werkzeug/posixemulation.pyc,, +werkzeug/routing.py,sha256=2JVtdSgxKGeANy4Z_FP-dKESvKtkYGCZ1J2fARCLGCY,67214 +werkzeug/routing.pyc,, +werkzeug/script.py,sha256=DwaVDcXdaOTffdNvlBdLitxWXjKaRVT32VbhDtljFPY,11365 +werkzeug/script.pyc,, +werkzeug/security.py,sha256=0m107exslz4QJLWQCpfQJ04z3re4eGHVggRvrQVAdWc,9193 +werkzeug/security.pyc,, +werkzeug/serving.py,sha256=A0flnIJHufdn2QJ9oeuHfrXwP3LzP8fn3rNW6hbxKUg,31926 +werkzeug/serving.pyc,, +werkzeug/test.py,sha256=XmECSmnpASiYQTct4oMiWr0LT5jHWCtKqnpYKZd2ui8,36100 +werkzeug/test.pyc,, +werkzeug/testapp.py,sha256=3HQRW1sHZKXuAjCvFMet4KXtQG3loYTFnvn6LWt-4zI,9396 +werkzeug/testapp.pyc,, +werkzeug/urls.py,sha256=dUeLg2IeTm0WLmSvFeD4hBZWGdOs-uHudR5-t8n9zPo,36771 +werkzeug/urls.pyc,, +werkzeug/useragents.py,sha256=BhYMf4cBTHyN4U0WsQedePIocmNlH_34C-UwqSThGCc,5865 +werkzeug/useragents.pyc,, +werkzeug/utils.py,sha256=BrY1j0DHQ8RTb0K1StIobKuMJhN9SQQkWEARbrh2qpk,22972 +werkzeug/utils.pyc,, +werkzeug/websocket.py,sha256=PpSeDxXD_0UsPAa5hQhQNM6mxibeUgn8lA8eRqiS0vM,11344 +werkzeug/websocket.pyc,, +werkzeug/wrappers.py,sha256=kbyL_aFjxELwPgMwfNCYjKu-CR6kNkh-oO8wv3GXbk8,84511 +werkzeug/wrappers.pyc,, +werkzeug/wsgi.py,sha256=1Nob-aeChWQf7MsiicO8RZt6J90iRzEcik44ev9Qu8s,49347 +werkzeug/wsgi.pyc,, diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/WHEEL b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..0de529b1ed2b97087004d395ee76302519a5f5a6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.26.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/metadata.json b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..bca8d1262af63728cd01e54423a3bf951a050bf6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/metadata.json @@ -0,0 +1 @@ +{"generator": "bdist_wheel (0.26.0)", "summary": "The comprehensive WSGI web application library.", "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"project_urls": {"Home": "https://www.palletsprojects.org/p/werkzeug/"}, "contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}}}, "license": "BSD", "metadata_version": "2.0", "name": "Werkzeug", "platform": "any", "extras": ["dev", "termcolor", "watchdog"], "run_requires": [{"requires": ["coverage", "pytest", "sphinx", "tox"], "extra": "dev"}, {"requires": ["termcolor"], "extra": "termcolor"}, {"requires": ["watchdog"], "extra": "watchdog"}], "version": "0.14.1"} \ No newline at end of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/top_level.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fe8da8499399d7c0484847967ad49e4b165589b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/Werkzeug-0.14.1.dist-info/top_level.txt @@ -0,0 +1 @@ +werkzeug diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46c7827ed5ec62b4613bf31d8331f731a24d34a9 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/__init__.py @@ -0,0 +1,8 @@ +__all__ = ["__version__"] + +try: + from ._version import version as __version__ +except ImportError: + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f39212f0ba7199aba92d14f3d2e6b5639ebabd5d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_argcomplete.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_argcomplete.py new file mode 100644 index 0000000000000000000000000000000000000000..455c3a7be086b645f502ad8edba57f18b6c7d3da --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,109 @@ +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +from glob import glob + + +class FastFilesCompleter(object): + "Fast file completer class" + + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # we are on unix, otherwise no bash + if not prefix or prefix[-1] == os.path.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser, always_complete_options=False) + + +else: + + def try_argcomplete(parser): + pass + + filescompleter = None diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_argcomplete.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_argcomplete.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc9eba81f4970998df56aa879300dc9a32cd6db1 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_argcomplete.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe755a39911bcad9a830d54e4d797b10e43703c3 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,14 @@ +""" python inspection/code generation API """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from .code import Code # noqa +from .code import ExceptionInfo # noqa +from .code import filter_traceback # noqa +from .code import Frame # noqa +from .code import getrawcode # noqa +from .code import Traceback # noqa +from .source import compile_ as compile # noqa +from .source import getfslineno # noqa +from .source import Source # noqa diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e692334301aa20f4122b9c7e5f99eb0aeea5cb4d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/_py2traceback.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/_py2traceback.py new file mode 100644 index 0000000000000000000000000000000000000000..8a8b7bc000a7ff4570ea7113b947c2cf9fc12a8f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/_py2traceback.py @@ -0,0 +1,94 @@ +# copied from python-2.7.3's traceback.py +# CHANGES: +# - some_str is replaced, trying to create unicode strings +# +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import types + +from six import text_type + + +def format_exception_only(etype, value): + """Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + """ + + # An instance should not have a meaningful value parameter, but + # sometimes does, particularly for string exceptions, such as + # >>> raise string1, string2 # deprecated + # + # Clear these out first because issubtype(string1, SyntaxError) + # would throw another exception and mask the original problem. + if ( + isinstance(etype, BaseException) + or isinstance(etype, types.InstanceType) + or etype is None + or type(etype) is str + ): + return [_format_final_exc_line(etype, value)] + + stype = etype.__name__ + + if not issubclass(etype, SyntaxError): + return [_format_final_exc_line(stype, value)] + + # It was a syntax error; show exactly where the problem was found. + lines = [] + try: + msg, (filename, lineno, offset, badline) = value.args + except Exception: + pass + else: + filename = filename or "" + lines.append(' File "{}", line {}\n'.format(filename, lineno)) + if badline is not None: + if isinstance(badline, bytes): # python 2 only + badline = badline.decode("utf-8", "replace") + lines.append(" {}\n".format(badline.strip())) + if offset is not None: + caretspace = badline.rstrip("\n")[:offset].lstrip() + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c.isspace() and c or " ") for c in caretspace) + # only three spaces to account for offset1 == pos 0 + lines.append(" {}^\n".format("".join(caretspace))) + value = msg + + lines.append(_format_final_exc_line(stype, value)) + return lines + + +def _format_final_exc_line(etype, value): + """Return a list of a single line -- normal case for format_exception_only""" + valuestr = _some_str(value) + if value is None or not valuestr: + line = "{}\n".format(etype) + else: + line = "{}: {}\n".format(etype, valuestr) + return line + + +def _some_str(value): + try: + return text_type(value) + except Exception: + try: + return bytes(value).decode("UTF-8", "replace") + except Exception: + pass + return "".format(type(value).__name__) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/_py2traceback.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/_py2traceback.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3eb697de6b04b11c24b939249c52421a7b3b22d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/_py2traceback.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/code.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/code.py new file mode 100644 index 0000000000000000000000000000000000000000..d06e24f006cc74796126fdc0b17896a307537abf --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/code.py @@ -0,0 +1,1052 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import inspect +import pprint +import re +import sys +import traceback +from inspect import CO_VARARGS +from inspect import CO_VARKEYWORDS +from weakref import ref + +import attr +import pluggy +import py +import six +from six import text_type + +import _pytest +from _pytest.compat import _PY2 +from _pytest.compat import _PY3 +from _pytest.compat import PY35 +from _pytest.compat import safe_str + +builtin_repr = repr + +if _PY3: + from traceback import format_exception_only +else: + from ._py2traceback import format_exception_only + + +class Code(object): + """ wrapper around Python code objects """ + + def __init__(self, rawcode): + if not hasattr(rawcode, "co_filename"): + rawcode = getrawcode(rawcode) + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" % (rawcode,)) + self.raw = rawcode + + def __eq__(self, other): + return self.raw == other.raw + + __hash__ = None + + def __ne__(self, other): + return not self == other + + @property + def path(self): + """ return a path object pointing to source code (note that it + might not point to an actually existing file). """ + try: + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + raise OSError("py.path check failed.") + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + + return p + + @property + def fullsource(self): + """ return a _pytest._code.Source object for the full source file of the code + """ + from _pytest._code import source + + full, _ = source.findsource(self.raw) + return full + + def source(self): + """ return a _pytest._code.Source object for the code object's source only + """ + # return source only for that part of code + import _pytest._code + + return _pytest._code.Source(self.raw) + + def getargs(self, var=False): + """ return a tuple with the argument names for the code object + + if 'var' is set True also return the names of the variable and + keyword arguments when present + """ + # handfull shortcut for getting args + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + self.code = Code(frame.f_code) + + @property + def statement(self): + """ statement this frame is at """ + import _pytest._code + + if self.code.fullsource is None: + return _pytest._code.Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + six.exec_(code, self.f_globals, f_locals) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return py.io.saferepr(object) + + def is_true(self, object): + return object + + def getargs(self, var=False): + """ return a list of tuples (name, value) for all arguments + + if 'var' is set True also include the variable and keyword + arguments when present + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry(object): + """ a single entry in a traceback """ + + _repr_style = None + exprinfo = None + + def __init__(self, rawentry, excinfo=None): + self._excinfo = excinfo + self._rawentry = rawentry + self.lineno = rawentry.tb_lineno - 1 + + def set_repr_style(self, mode): + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self): + import _pytest._code + + return _pytest._code.Frame(self._rawentry.tb_frame) + + @property + def relline(self): + return self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" % (self.frame.code.path, self.lineno + 1) + + @property + def statement(self): + """ _pytest._code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + + @property + def path(self): + """ path to the source code """ + return self.frame.code.path + + def getlocals(self): + return self.frame.f_locals + + locals = property(getlocals, None, None, "locals of underlaying frame") + + def getfirstlinesource(self): + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) + + def getsource(self, astcache=None): + """ return failing source code. """ + # we use the passed in astcache to not reparse asttrees + # within exception info printing + from _pytest._code.source import getstatementrange_ast + + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + mostly for internal use + """ + try: + tbh = self.frame.f_locals["__tracebackhide__"] + except KeyError: + try: + tbh = self.frame.f_globals["__tracebackhide__"] + except KeyError: + return False + + if callable(tbh): + return tbh(None if self._excinfo is None else self._excinfo()) + else: + return tbh + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = "???" + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: # noqa + line = "???" + return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line) + + def name(self): + return self.frame.code.raw.co_name + + name = property(name, None, None, "co_name of underlaying code") + + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + + Entry = TracebackEntry + + def __init__(self, tb, excinfo=None): + """ initialize from given python traceback object and ExceptionInfo """ + self._excinfo = excinfo + if hasattr(tb, "tb_next"): + + def f(cur): + while cur is not None: + yield self.Entry(cur, excinfo=excinfo) + cur = cur.tb_next + + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ( + (path is None or codepath == path) + and ( + excludepath is None + or not hasattr(codepath, "relto") + or not codepath.relto(excludepath) + ) + and (lineno is None or x.lineno == lineno) + and (firstlineno is None or x.frame.code.firstlineno == firstlineno) + ): + return Traceback(x._rawentry, self._excinfo) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackEntry + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackEntries which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self), self._excinfo) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + for i in range(-1, -len(self) - 1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackEntry where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + # print "checking for recursion at", key + values = cache.setdefault(key, []) + if values: + f = entry.frame + loc = f.f_locals + for otherloc in values: + if f.is_true( + f.eval( + co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc, + ) + ): + return i + values.append(entry.frame.f_locals) + return None + + +co_equal = compile( + "__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval" +) + + +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + + _striptext = "" + _assert_start_repr = ( + "AssertionError(u'assert " if _PY2 else "AssertionError('assert " + ) + + def __init__(self, tup=None, exprinfo=None): + import _pytest._code + + if tup is None: + tup = sys.exc_info() + if exprinfo is None and isinstance(tup[1], AssertionError): + exprinfo = getattr(tup[1], "msg", None) + if exprinfo is None: + exprinfo = py.io.saferepr(tup[1]) + if exprinfo and exprinfo.startswith(self._assert_start_repr): + self._striptext = "AssertionError: " + self._excinfo = tup + #: the exception class + self.type = tup[0] + #: the exception instance + self.value = tup[1] + #: the exception raw traceback + self.tb = tup[2] + #: the exception type name + self.typename = self.type.__name__ + #: the exception traceback (_pytest._code.Traceback instance) + self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self)) + + def __repr__(self): + return "" % (self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + _pytest._code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno + 1, exconly) + + def getrepr( + self, + showlocals=False, + style="long", + abspath=False, + tbfilter=True, + funcargs=False, + truncate_locals=True, + chain=True, + ): + """ + Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: long|short|no|native traceback style + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param bool tbfilter: + Hide entries that contain a local variable ``__tracebackhide__==True``. + Ignored if ``style=="native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool chain: if chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + ReprTracebackNative( + traceback.format_exception( + self.type, self.value, self.traceback[0]._rawentry + ) + ), + self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def __str__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + + def __unicode__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return text_type(loc) + + def match(self, regexp): + """ + Match the regular expression 'regexp' on the string representation of + the exception. If it matches then True is returned (so that it is + possible to write 'assert excinfo.match()'). If it doesn't match an + AssertionError is raised. + """ + __tracebackhide__ = True + if not re.search(regexp, str(self.value)): + assert 0, "Pattern '{!s}' not found in '{!s}'".format(regexp, self.value) + return True + + +@attr.s +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + showlocals = attr.ib(default=False) + style = attr.ib(default="long") + abspath = attr.ib(default=True) + tbfilter = attr.ib(default=True) + funcargs = attr.ib(default=False) + truncate_locals = attr.ib(default=True) + chain = attr.ib(default=True) + astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False) + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except: # noqa + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: # noqa + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return py.io.saferepr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None, short=False): + """ return formatted and marked up source lines. """ + import _pytest._code + + lines = [] + if source is None or line_index >= len(source.lines): + source = _pytest._code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = self._saferepr(value) + else: + str_repr = pprint.pformat(value) + # if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" % (name, str_repr)) + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + import _pytest._code + + source = self._getentrysource(entry) + if source is None: + source = _pytest._code.Source("???") + line_index = 0 + else: + # entry.getfirstlinesource() can be -1, should be 0 on jython + line_index = entry.lineno - max(entry.getfirstlinesource(), 0) + + lines = [] + style = entry._repr_style + if style is None: + style = self.style + if style in ("short", "long"): + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" % (entry.name) + else: + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = None + if not short: + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path): + if not self.abspath: + try: + np = py.path.local().bestrelpath(path) + except OSError: + return path + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + + if is_recursion_error(excinfo): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + last = traceback[-1] + entries = [] + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback(self, traceback): + """ + Truncate the given recursive traceback trying to find the starting point + of the recursion. + + The detection is done by going through each traceback entry and finding the + point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``. + + Handle the situation where the recursion process might raise an exception (for example + comparing numpy arrays using equality raises a TypeError), in which case we do our best to + warn the user of the error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + " {exc_type}: {exc_msg}\n" + " Displaying first and last {max_frames} stack frames out of {total}." + ).format( + exc_type=type(e).__name__, + exc_msg=safe_str(e), + max_frames=max_frames, + total=len(traceback), + ) + traceback = traceback[:max_frames] + traceback[-max_frames:] + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo(self, excinfo): + if _PY2: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + + return ReprExceptionInfo(reprtraceback, reprcrash) + else: + repr_chain = [] + e = excinfo.value + descr = None + seen = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + if excinfo: + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + else: + # fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work + reprtraceback = ReprTracebackNative( + traceback.format_exception(type(e), e, None) + ) + reprcrash = None + + repr_chain += [(reprtraceback, reprcrash, descr)] + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo = ( + ExceptionInfo((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None + and not e.__suppress_context__ + and self.chain + ): + e = e.__context__ + excinfo = ( + ExceptionInfo((type(e), e, e.__traceback__)) + if e.__traceback__ + else None + ) + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +class TerminalRepr(object): + def __str__(self): + s = self.__unicode__() + if _PY2: + s = s.encode("utf-8") + return s + + def __unicode__(self): + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = py.io.TextIO() + tw = py.io.TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" % (self.__class__, id(self)) + + +class ExceptionRepr(TerminalRepr): + def __init__(self): + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +class ExceptionChainRepr(ExceptionRepr): + def __init__(self, chain): + super(ExceptionChainRepr, self).__init__() + self.chain = chain + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain + self.reprtraceback = chain[-1][0] + self.reprcrash = chain[-1][1] + + def toterminal(self, tw): + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super(ExceptionChainRepr, self).toterminal(tw) + + +class ReprExceptionInfo(ExceptionRepr): + def __init__(self, reprtraceback, reprcrash): + super(ReprExceptionInfo, self).__init__() + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + super(ReprExceptionInfo, self).toterminal(tw) + + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + # the entries might have different styles + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if ( + entry.style == "long" + or entry.style == "short" + and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines): + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + + +class ReprEntryNative(TerminalRepr): + style = "native" + + def __init__(self, tblines): + self.lines = tblines + + def toterminal(self, tw): + tw.write("".join(self.lines)) + + +class ReprEntry(TerminalRepr): + localssep = "_ " + + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + self.style = style + + def toterminal(self, tw): + if self.style == "short": + self.reprfileloc.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + # tw.line("") + return + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + # tw.sep(self.localssep, "Locals") + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), self.reprlocals, self.reprfileloc) + + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(":%s: %s" % (self.lineno, msg)) + + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" % (safe_str(name), safe_str(value)) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getrawcode(obj, trycall=True): + """ return code object for given function. """ + try: + return obj.__code__ + except AttributeError: + obj = getattr(obj, "im_func", obj) + obj = getattr(obj, "func_code", obj) + obj = getattr(obj, "f_code", obj) + obj = getattr(obj, "__code__", obj) + if trycall and not hasattr(obj, "co_firstlineno"): + if hasattr(obj, "__call__") and not inspect.isclass(obj): + x = getrawcode(obj.__call__, trycall=False) + if hasattr(x, "co_firstlineno"): + return x + return obj + + +if PY35: # RecursionError introduced in 3.5 + + def is_recursion_error(excinfo): + return excinfo.errisinstance(RecursionError) # noqa + + +else: + + def is_recursion_error(excinfo): + if not excinfo.errisinstance(RuntimeError): + return False + try: + return "maximum recursion depth exceeded" in str(excinfo.value) + except UnicodeError: + return False + + +# relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance + +_PLUGGY_DIR = py.path.local(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.basename == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.dirpath() +_PYTEST_DIR = py.path.local(_pytest.__file__).dirpath() +_PY_DIR = py.path.local(py.__file__).dirpath() + + +def filter_traceback(entry): + """Return True if a TracebackEntry instance should be removed from tracebacks: + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code + # see https://bitbucket.org/pytest-dev/py/issues/71 + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + # entry.path might point to a non-existing file, in which case it will + # also return a str object. see #1133 + p = py.path.local(entry.path) + return ( + not p.relto(_PLUGGY_DIR) and not p.relto(_PYTEST_DIR) and not p.relto(_PY_DIR) + ) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/code.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/code.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0285c334a79189045d431ac515c5250dc0e248c3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/code.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/source.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/source.py new file mode 100644 index 0000000000000000000000000000000000000000..072ddb1b8b81bdb9b0b5c73bbee9c19c76690b0b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/source.py @@ -0,0 +1,325 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import inspect +import linecache +import sys +import textwrap +import tokenize +from ast import PyCF_ONLY_AST as _AST_FLAG +from bisect import bisect_right + +import py +import six + +cpy_compile = compile + + +class Source(object): + """ an immutable object holding a source code fragment, + possibly deindenting it. + """ + + _compilecounter = 0 + + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get("deindent", True) + for part in parts: + if not part: + partlines = [] + elif isinstance(part, Source): + partlines = part.lines + elif isinstance(part, (tuple, list)): + partlines = [x.rstrip("\n") for x in part] + elif isinstance(part, six.string_types): + partlines = part.split("\n") + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + __hash__ = None + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + return newsource + + def __len__(self): + return len(self.lines) + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before="", after="", indent=" " * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [(indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=" " * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self): + """return a new source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + from parser import suite as syntax_checker + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + # compile(source+'\n', "x", "exec") + syntax_checker(source + "\n") + except KeyboardInterrupt: + raise + except Exception: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile( + self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None + ): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno + base = "<%d-codegen " % self._compilecounter + self.__class__._compilecounter += 1 + if not filename: + filename = base + "%s:%d>" % (fn, lineno) + else: + filename = base + "%r %s:%d>" % (filename, fn, lineno) + source = "\n".join(self.lines) + "\n" + try: + co = cpy_compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[: ex.lineno] + if ex.offset: + msglines.append(" " * ex.offset + "^") + msglines.append("(code was compiled probably from here: %s)" % filename) + newex = SyntaxError("\n".join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + lines = [(x + "\n") for x in self.lines] + linecache.cache[filename] = (1, None, lines, filename) + return co + + +# +# public API shortcut functions +# + + +def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0): + """ compile the given source to a raw code object, + and maintain an internal cache which allows later + retrieval of the source code for the code object + and any recursively created code objects. + """ + if isinstance(source, ast.AST): + # XXX should Source support having AST? + return cpy_compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + """ Return source location (path, lineno) for the given object. + If the source cannot be determined return ("", -1) + """ + from .code import Code + + try: + code = Code(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) + except TypeError: + return "", -1 + + fspath = fn and py.path.local(fn) or None + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + pass + else: + fspath = code.path + lineno = code.firstlineno + assert isinstance(lineno, int) + return fspath, lineno + + +# +# helper functions +# + + +def findsource(obj): + try: + sourcelines, lineno = inspect.findsource(obj) + except py.builtin._sysex: + raise + except: # noqa + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + + +def getsource(obj, **kwargs): + from .code import getrawcode + + obj = getrawcode(obj) + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = '"Buggy python version consider upgrading, cannot get source"' + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + + +def deindent(lines): + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno, node): + import ast + + # flatten all statements and except handlers into one lineno-list + # AST's line numbers start indexing at 1 + values = [] + for x in ast.walk(node): + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val = getattr(x, name, None) + if val: + # treat the finally/orelse part as its own statement + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast(lineno, source, assertion=False, astnode=None): + if astnode is None: + content = str(source) + astnode = compile(content, "source", "exec", 1024) # 1024 for AST + + start, end = get_statement_startend2(lineno, astnode) + # we need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself + block_finder = inspect.BlockFinder() + # if we start with an indented line, put blockfinder to "started" mode + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # the end might still point to a comment or empty line, correct it + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/source.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/source.pyc new file mode 100644 index 0000000000000000000000000000000000000000..697aa88e987a16e12f9fcc509d7c9fdd8e7f14d9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_code/source.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_version.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..add38674cde6dac11895e0df7edabd2b0f0a39e4 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_version.py @@ -0,0 +1,4 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '3.9.3' diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_version.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_version.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22e3b4b7354b51dc377fe9482162965e6cc0c250 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/_version.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b5c846c2c00279d90855e4630485ed432a2c600f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,155 @@ +""" +support for presenting detailed information in failing assertions. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys + +import six + +from _pytest.assertion import rewrite +from _pytest.assertion import truncate +from _pytest.assertion import util + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help="""Control assertion debugging tools. 'plain' + performs no assertion debugging. 'rewrite' + (the default) rewrites assert statements in + test modules on import to provide assert + expression information.""", + ) + + +def register_assert_rewrite(*names): + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :raise TypeError: if the given module names are not strings. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + importhook = DummyRewriteHook() + importhook.mark_rewrite(*names) + + +class DummyRewriteHook(object): + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names): + pass + + +class AssertionState(object): + """State for the assertion plugin.""" + + def __init__(self, config, mode): + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook = None + + +def install_importhook(config): + """Try to install the rewrite hook, raise SystemError if it fails.""" + # Jython has an AST bug that make the assertion rewriting hook malfunction. + if sys.platform.startswith("java"): + raise SystemError("rewrite not supported") + + config._assertstate = AssertionState(config, "rewrite") + config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config._assertstate.trace("installed rewrite import hook") + + def undo(): + hook = config._assertstate.hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session): + # this hook is only called when test modules are collected + # so for example not in the master process of pytest-xdist + # (which does not collect test modules) + assertstate = getattr(session.config, "_assertstate", None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +def pytest_runtest_setup(item): + """Setup the pytest_assertrepr_compare hook + + The newinterpret and rewrite modules will use util._reprcompare if + it exists to use custom reporting via the + pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + + def callbinrepr(op, left, right): + """Call the pytest_assertrepr_compare hook and prepare the result + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = item.ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = six.text_type("\n~").join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + + util._reprcompare = callbinrepr + + +def pytest_runtest_teardown(item): + util._reprcompare = None + + +def pytest_sessionfinish(session): + assertstate = getattr(session.config, "_assertstate", None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +# Expose this plugin's implementation for the pytest_assertrepr_compare hook +pytest_assertrepr_compare = util.assertrepr_compare diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fb94cb1b969e40f93494a2f0fef22408850aa24 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/rewrite.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 0000000000000000000000000000000000000000..ecb24ff7ca8904f6ce103c28e368412cce22e5da --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1035 @@ +"""Rewrite assertion AST to produce nice error messages""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import ast +import errno +import imp +import itertools +import marshal +import os +import re +import string +import struct +import sys +import types + +import atomicwrites +import py +import six + +from _pytest.assertion import util +from _pytest.compat import spec_from_file_location +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import PurePath + +# pytest caches rewritten pycs in __pycache__. +if hasattr(imp, "get_tag"): + PYTEST_TAG = imp.get_tag() + "-PYTEST" +else: + if hasattr(sys, "pypy_version_info"): + impl = "pypy" + elif sys.platform == "java": + impl = "jython" + else: + impl = "cpython" + ver = sys.version_info + PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) + del ver, impl + +PYC_EXT = ".py" + (__debug__ and "c" or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 + +if sys.version_info >= (3, 5): + ast_Call = ast.Call +else: + + def ast_Call(a, b, c): + return ast.Call(a, b, c, None, None) + + +class AssertionRewritingHook(object): + """PEP302 Import hook which rewrites asserts.""" + + def __init__(self, config): + self.config = config + self.fnpats = config.getini("python_files") + self.session = None + self.modules = {} + self._rewritten_names = set() + self._register_with_pkg_resources() + self._must_rewrite = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache = {} + self._session_paths_checked = False + + def set_session(self, session): + self.session = session + self._session_paths_checked = False + + def _imp_find_module(self, name, path=None): + """Indirection so we can mock calls to find_module originated from the hook during testing""" + return imp.find_module(name, path) + + def find_module(self, name, path=None): + if self._writing_pyc: + return None + state = self.config._assertstate + if self._early_rewrite_bailout(name, state): + return None + state.trace("find_module called for: %s" % name) + names = name.rsplit(".", 1) + lastname = names[-1] + pth = None + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] + if pth is None: + try: + fd, fn, desc = self._imp_find_module(lastname, path) + except ImportError: + return None + if fd is not None: + fd.close() + tp = desc[2] + if tp == imp.PY_COMPILED: + if hasattr(imp, "source_from_cache"): + try: + fn = imp.source_from_cache(fn) + except ValueError: + # Python 3 doesn't like orphaned but still-importable + # .pyc files. + fn = fn[:-1] + else: + fn = fn[:-1] + elif tp != imp.PY_SOURCE: + # Don't know what this is. + return None + else: + fn = os.path.join(pth, name.rpartition(".")[2] + ".py") + + fn_pypath = py.path.local(fn) + if not self._should_rewrite(name, fn_pypath, state): + return None + + self._rewritten_names.add(name) + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = os.path.join(fn_pypath.dirname, "__pycache__") + if write: + try: + os.mkdir(cache_dir) + except OSError: + e = sys.exc_info()[1].errno + if e == errno.EEXIST: + # Either the __pycache__ directory already exists (the + # common case) or it's blocked by a non-dir node. In the + # latter case, we'll ignore it in _write_pyc. + pass + elif e in [errno.ENOENT, errno.ENOTDIR]: + # One of the path components was not a directory, likely + # because we're in a zip file. + write = False + elif e in [errno.EACCES, errno.EROFS, errno.EPERM]: + state.trace("read only directory: %r" % fn_pypath.dirname) + write = False + else: + raise + cache_name = fn_pypath.basename[:-3] + PYC_TAIL + pyc = os.path.join(cache_dir, cache_name) + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn_pypath, pyc, state.trace) + if co is None: + state.trace("rewriting %r" % (fn,)) + source_stat, co = _rewrite_test(self.config, fn_pypath) + if co is None: + # Probably a SyntaxError in the test. + return None + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace("found cached rewritten pyc for %r" % (fn,)) + self.modules[name] = co, pyc + return self + + def _early_rewrite_bailout(self, name, state): + """ + This is a fast way to get out of rewriting modules. Profiling has + shown that the call to imp.find_module (inside of the find_module + from this class) is a major slowdown, so, this method tries to + filter what we're sure won't be rewritten before getting to it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(path).split(os.path.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(os.path.sep.join(parts) + ".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace("early skip of rewriting module: %s" % (name,)) + return True + + def _should_rewrite(self, name, fn_pypath, state): + # always rewrite conftest files + fn = str(fn_pypath) + if fn_pypath.basename == "conftest.py": + state.trace("rewriting conftest file: %r" % (fn,)) + return True + + if self.session is not None: + if self.session.isinitpath(fn): + state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + for pat in self.fnpats: + if fn_pypath.fnmatch(pat): + state.trace("matched test file %r" % (fn,)) + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name, state): + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace("matched marked file %r (from %r)" % (name, marked)) + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names): + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + if not AssertionRewriter.is_rewrite_disabled( + sys.modules[name].__doc__ or "" + ): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name): + from _pytest.warning_types import PytestWarning + from _pytest.warnings import _issue_config_warning + + _issue_config_warning( + PytestWarning("Module already imported so cannot be rewritten: %s" % name), + self.config, + ) + + def load_module(self, name): + co, pyc = self.modules.pop(name) + if name in sys.modules: + # If there is an existing module object named 'fullname' in + # sys.modules, the loader must use that existing module. (Otherwise, + # the reload() builtin will not work correctly.) + mod = sys.modules[name] + else: + # I wish I could just call imp.load_compiled here, but __file__ has to + # be set properly. In Python 3.2+, this all would be handled correctly + # by load_compiled. + mod = sys.modules[name] = imp.new_module(name) + try: + mod.__file__ = co.co_filename + # Normally, this attribute is 3.2+. + mod.__cached__ = pyc + mod.__loader__ = self + # Normally, this attribute is 3.4+ + mod.__spec__ = spec_from_file_location(name, co.co_filename, loader=self) + six.exec_(co, mod.__dict__) + except: # noqa + if name in sys.modules: + del sys.modules[name] + raise + return sys.modules[name] + + def is_package(self, name): + try: + fd, fn, desc = self._imp_find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + def get_data(self, pathname): + """Optional PEP302 get_data API. + """ + with open(pathname, "rb") as f: + return f.read() + + +def _write_pyc(state, co, source_stat, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) + try: + with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp: + fp.write(imp.get_magic()) + mtime = int(source_stat.mtime) + size = source_stat.size & 0xFFFFFFFF + fp.write(struct.pack(">", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", +} +# Python 3.5+ compatibility +try: + binop_map[ast.MatMult] = "@" +except AttributeError: + pass + +# Python 3.4+ compatibility +if hasattr(ast, "NameConstant"): + _NameConstant = ast.NameConstant +else: + + def _NameConstant(c): + return ast.Name(str(c), ast.Load()) + + +def set_location(node, lineno, col_offset): + """Set node location information recursively.""" + + def _fix(node, lineno, col_offset): + if "lineno" in node._attributes: + node.lineno = lineno + if "col_offset" in node._attributes: + node.col_offset = col_offset + for child in ast.iter_child_nodes(node): + _fix(child, lineno, col_offset) + + _fix(node, lineno, col_offset) + return node + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :on_failure: The AST statements which will be executed if the + assertion test fails. This is the code which will construct + the failure message and raises the AssertionError. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + This state is reset on every new assert statement visited and used + by the other visitors. + + """ + + def __init__(self, module_path, config): + super(AssertionRewriter, self).__init__() + self.module_path = module_path + self.config = config + + def run(self, mod): + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + # Insert some special imports at the top of the module but after any + # docstrings and __future__ imports. + aliases = [ + ast.alias(py.builtin.builtins.__name__, "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), + ] + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + lineno = 1 + for item in mod.body: + if ( + expect_docstring + and isinstance(item, ast.Expr) + and isinstance(item.value, ast.Str) + ): + doc = item.value.s + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + elif ( + not isinstance(item, ast.ImportFrom) + or item.level > 0 + or item.module != "__future__" + ): + lineno = item.lineno + break + pos += 1 + else: + lineno = item.lineno + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + # Collect asserts. + nodes = [mod] + while nodes: + node = nodes.pop() + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + # Don't recurse into expressions as they can't contain + # asserts. + and not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring): + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self): + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr): + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr): + """Call py.io.saferepr on the expression.""" + return self.helper("saferepr", expr) + + def helper(self, name, *args): + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, "_" + name, ast.Load()) + return ast_Call(attr, list(args), []) + + def builtin(self, name): + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr): + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self): + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + + """ + self.explanation_specifiers = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr): + """Format the %-formatted string with current format context. + + The expl_expr should be an ast.Str instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .on_failure and + return the ast.Name instance of the formatted string. + + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys = [ast.Str(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node): + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_): + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + from _pytest.warning_types import PytestWarning + import warnings + + warnings.warn_explicit( + PytestWarning("assertion is always true, perhaps remove parentheses?"), + category=None, + filename=str(self.module_path), + lineno=assert_.lineno, + ) + + self.statements = [] + self.variables = [] + self.variable_counter = itertools.count() + self.stack = [] + self.on_failure = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + # Create failure message. + body = self.on_failure + negation = ast.UnaryOp(ast.Not(), top_condition) + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Str("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast_Call(err_name, [fmt], []) + if sys.version_info[0] >= 3: + raise_ = ast.Raise(exc, None) + else: + raise_ = ast.Raise(exc, None, None) + body.append(raise_) + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, _NameConstant(None)) + self.statements.append(clear) + # Fix line numbers. + for stmt in self.statements: + set_location(stmt, assert_.lineno, assert_.col_offset) + return self.statements + + def visit_Name(self, name): + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast_Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop): + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.on_failure + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner = [] + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa + self.on_failure = fail_inner + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Str(expl)) + call = ast_Call(app, [expl_format], []) + self.on_failure.append(ast.Expr(call)) + if i < levels: + cond = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.on_failure = fail_save + expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop): + symbol = binop_map[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = "(%s %s %s)" % (left_expl, symbol, right_expl) + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call_35(self, call): + """ + visit `ast.Call` nodes on Python3.5 and after + """ + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) + return res, outer_expl + + def visit_Starred(self, starred): + # From Python 3.5, a Starred node can appear in a function call + res, expl = self.visit(starred.value) + return starred, "*" + expl + + def visit_Call_legacy(self, call): + """ + visit `ast.Call nodes on 3.4 and below` + """ + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + new_star = new_kwarg = None + for arg in call.args: + res, expl = self.visit(arg) + new_args.append(res) + arg_expls.append(expl) + for keyword in call.keywords: + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + arg_expls.append(keyword.arg + "=" + expl) + if call.starargs: + new_star, expl = self.visit(call.starargs) + arg_expls.append("*" + expl) + if call.kwargs: + new_kwarg, expl = self.visit(call.kwargs) + arg_expls.append("**" + expl) + expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) + return res, outer_expl + + # ast.Call signature changed on 3.5, + # conditionally change which methods is named + # visit_Call depending on Python version + if sys.version_info >= (3, 5): + visit_Call = visit_Call_35 + else: + visit_Call = visit_Call_legacy + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp): + self.push_format_context() + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + left_expl = "({})".format(left_expl) + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls = [] + syms = [] + results = [left_res] + for i, op, next_operand in it: + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + next_expl = "({})".format(next_expl) + results.append(next_res) + sym = binop_map[op.__class__] + syms.append(ast.Str(sym)) + expl = "%s %s %s" % (left_expl, sym, next_expl) + expls.append(ast.Str(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + return res, self.explanation_param(self.pop_format_context(expl_call)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/rewrite.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/rewrite.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df008034b4fbebccd42767bc6a823a5acaaee82f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/rewrite.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/truncate.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 0000000000000000000000000000000000000000..d19c8b61eee371c0c160cb4e8018d48d740d4fd7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,102 @@ +""" +Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +~8 terminal lines, unless running in "-vv" mode or running on CI. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import six + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required(explanation, item, max_length=None): + """ + Truncate this assertion explanation if the given test item is eligible. + """ + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item): + """ + Whether or not this test item is eligible for truncation. + """ + verbose = item.config.option.verbose + return verbose < 2 and not _running_on_ci() + + +def _running_on_ci(): + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) + + +def _truncate_explanation(input_lines, max_lines=None, max_chars=None): + """ + Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first. The remaining lines will be replaced by a usage message. + """ + + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + if len(input_lines) <= max_lines and input_char_count <= max_chars: + return input_lines + + # Truncate first to max_lines, and then truncate to max_chars if max_chars + # is exceeded. + truncated_explanation = input_lines[:max_lines] + truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) + + # Add ellipsis to final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + + # Append useful message to explanation + truncated_line_count = len(input_lines) - len(truncated_explanation) + truncated_line_count += 1 # Account for the part-truncated final line + msg = "...Full output truncated" + if truncated_line_count == 1: + msg += " ({} line hidden)".format(truncated_line_count) + else: + msg += " ({} lines hidden)".format(truncated_line_count) + msg += ", {}".format(USAGE_MSG) + truncated_explanation.extend([six.text_type(""), six.text_type(msg)]) + return truncated_explanation + + +def _truncate_by_char_count(input_lines, max_chars): + # Check if truncation required + if len("".join(input_lines)) <= max_chars: + return input_lines + + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/truncate.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/truncate.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18ffdb797148e1fff56eaa24901e55e9fc2939c8 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/truncate.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/util.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/util.py new file mode 100644 index 0000000000000000000000000000000000000000..0633410951bf3f1571a655eb4f583ab028acf189 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/util.py @@ -0,0 +1,342 @@ +"""Utilities for assertion debugging""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pprint + +import py +import six + +import _pytest._code +from ..compat import Sequence + +u = six.text_type + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare = None + + +# the re-encoding is needed for python2 repr +# with non-ascii characters (see issue 877 and 1379) +def ecu(s): + try: + return u(s, "utf-8", "replace") + except TypeError: + return s + + +def format_explanation(explanation): + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + explanation = ecu(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u("\n").join(result) + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u("")).split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = u("and ") + else: + s = u("where ") + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(u(" +") + u(" ") * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(u(" ") * indent + line[1:]) + assert len(stack) == 1 + return result + + +# Provide basestring in python3 +try: + basestring = basestring +except NameError: + basestring = str + + +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op + left_repr = py.io.saferepr(left, maxsize=int(width // 2)) + right_repr = py.io.saferepr(right, maxsize=width - len(left_repr)) + + summary = u("%s %s %s") % (ecu(left_repr), op, ecu(right_repr)) + + def issequence(x): + return isinstance(x, Sequence) and not isinstance(x, basestring) + + def istext(x): + return isinstance(x, basestring) + + def isdict(x): + return isinstance(x, dict) + + def isset(x): + return isinstance(x, (set, frozenset)) + + def isiterable(obj): + try: + iter(obj) + return not istext(obj) + except TypeError: + return False + + verbose = config.getoption("verbose") + explanation = None + try: + if op == "==": + if istext(left) and istext(right): + explanation = _diff_text(left, right, verbose) + else: + if issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, verbose) + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, verbose) + if explanation is not None: + explanation.extend(expl) + else: + explanation = expl + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + except Exception: + explanation = [ + u( + "(pytest_assertion plugin: representation of details failed. " + "Probably an object has a faulty __repr__.)" + ), + u(_pytest._code.ExceptionInfo()), + ] + + if not explanation: + return None + + return [summary] + explanation + + +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. + """ + from difflib import ndiff + + explanation = [] + + def escape_for_readable_diff(binary_text): + """ + Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode. + This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape + newlines and carriage returns (#429). + """ + r = six.text_type(repr(binary_text)[1:-1]) + r = r.replace(r"\n", "\n") + r = r.replace(r"\r", "\r") + return r + + if isinstance(left, bytes): + left = escape_for_readable_diff(left) + if isinstance(right, bytes): + right = escape_for_readable_diff(right) + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + u("Skipping %s identical leading characters in diff, use -v to show") + % i + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + u( + "Skipping %s identical trailing " + "characters in diff, use -v to show" + ) + % i + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += [u"Strings contain only whitespace, escaping them using repr()"] + explanation += [ + line.strip("\n") + for line in ndiff(left.splitlines(keepends), right.splitlines(keepends)) + ] + return explanation + + +def _compare_eq_iterable(left, right, verbose=False): + if not verbose: + return [u("Use -v to get the full diff")] + # dynamic import to speedup pytest + import difflib + + try: + left_formatting = pprint.pformat(left).splitlines() + right_formatting = pprint.pformat(right).splitlines() + explanation = [u("Full diff:")] + except Exception: + # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling + # sorted() on a list would raise. See issue #718. + # As a workaround, the full diff is generated by using the repr() string of each item of each container. + left_formatting = sorted(repr(x) for x in left) + right_formatting = sorted(repr(x) for x in right) + explanation = [u("Full diff (fallback to calling repr on each item):")] + explanation.extend( + line.strip() for line in difflib.ndiff(left_formatting, right_formatting) + ) + return explanation + + +def _compare_eq_sequence(left, right, verbose=False): + explanation = [] + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + explanation += [u("At index %s diff: %r != %r") % (i, left[i], right[i])] + break + if len(left) > len(right): + explanation += [ + u("Left contains more items, first extra item: %s") + % py.io.saferepr(left[len(right)]) + ] + elif len(left) < len(right): + explanation += [ + u("Right contains more items, first extra item: %s") + % py.io.saferepr(right[len(left)]) + ] + return explanation + + +def _compare_eq_set(left, right, verbose=False): + explanation = [] + diff_left = left - right + diff_right = right - left + if diff_left: + explanation.append(u("Extra items in the left set:")) + for item in diff_left: + explanation.append(py.io.saferepr(item)) + if diff_right: + explanation.append(u("Extra items in the right set:")) + for item in diff_right: + explanation.append(py.io.saferepr(item)) + return explanation + + +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += [u("Omitting %s identical items, use -vv to show") % len(same)] + elif same: + explanation += [u("Common items:")] + explanation += pprint.pformat(same).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += [u("Differing items:")] + for k in diff: + explanation += [ + py.io.saferepr({k: left[k]}) + " != " + py.io.saferepr({k: right[k]}) + ] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u("Left contains more items:")) + explanation.extend( + pprint.pformat({k: left[k] for k in extra_left}).splitlines() + ) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u("Right contains more items:")) + explanation.extend( + pprint.pformat({k: right[k] for k in extra_right}).splitlines() + ) + return explanation + + +def _notin_text(term, text, verbose=False): + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(correct_text, text, verbose) + newdiff = [u("%s is contained here:") % py.io.saferepr(term, maxsize=42)] + for line in diff: + if line.startswith(u("Skipping")): + continue + if line.startswith(u("- ")): + continue + if line.startswith(u("+ ")): + newdiff.append(u(" ") + line[2:]) + else: + newdiff.append(line) + return newdiff diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/util.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/util.pyc new file mode 100644 index 0000000000000000000000000000000000000000..787fe03bc0ce27e203ef5dd6575d9a7fb28d232a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/assertion/util.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/cacheprovider.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/cacheprovider.py new file mode 100644 index 0000000000000000000000000000000000000000..8e7f7a804547cec9d1d3854662dcd9d3f59a5131 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,366 @@ +""" +merged implementation of the cache provider + +the name cache was not chosen to ensure pluggy automatically +ignores the external pytest-cache +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +from collections import OrderedDict + +import attr +import py +import six + +import pytest +from .compat import _PY2 as PY2 +from .pathlib import Path +from .pathlib import resolve_from_str +from .pathlib import rmtree + +README_CONTENT = u"""\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information. +""" + + +@attr.s +class Cache(object): + _cachedir = attr.ib(repr=False) + _config = attr.ib(repr=False) + + @classmethod + def for_config(cls, config): + cachedir = cls.cache_dir_from_config(config) + if config.getoption("cacheclear") and cachedir.exists(): + rmtree(cachedir, force=True) + cachedir.mkdir() + return cls(cachedir, config) + + @staticmethod + def cache_dir_from_config(config): + return resolve_from_str(config.getini("cache_dir"), config.rootdir) + + def warn(self, fmt, **args): + from _pytest.warnings import _issue_config_warning + from _pytest.warning_types import PytestWarning + + _issue_config_warning( + PytestWarning(fmt.format(**args) if args else fmt), self._config + ) + + def makedir(self, name): + """ return a directory path object with the given name. If the + directory does not yet exist, it will be created. You can use it + to manage files likes e. g. store/retrieve database + dumps across test sessions. + + :param name: must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + name = Path(name) + if len(name.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath("d", name) + res.mkdir(exist_ok=True, parents=True) + return py.path.local(res) + + def _getvaluepath(self, key): + return self._cachedir.joinpath("v", Path(key)) + + def get(self, key, default): + """ return cached value for the given key. If no value + was yet cached or the value cannot be read, the specified + default is returned. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: must be provided in case of a cache-miss or + invalid cache values. + + """ + path = self._getvaluepath(key) + try: + with path.open("r") as f: + return json.load(f) + except (ValueError, IOError, OSError): + return default + + def set(self, key, value): + """ save value for the given key. + + :param key: must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: must be of any combination of basic + python types, including nested types + like e. g. lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + path.parent.mkdir(exist_ok=True, parents=True) + except (IOError, OSError): + self.warn("could not create cache path {path}", path=path) + return + try: + f = path.open("wb" if PY2 else "w") + except (IOError, OSError): + self.warn("cache could not write path {path}", path=path) + else: + with f: + json.dump(value, f, indent=2, sort_keys=True) + self._ensure_supporting_files() + + def _ensure_supporting_files(self): + """Create supporting files in the cache dir that are not really part of the cache.""" + if self._cachedir.is_dir(): + readme_path = self._cachedir / "README.md" + if not readme_path.is_file(): + readme_path.write_text(README_CONTENT) + + msg = u"# created by pytest automatically, do not change\n*" + self._cachedir.joinpath(".gitignore").write_text(msg, encoding="UTF-8") + + +class LFPlugin(object): + """ Plugin which implements the --lf (run last-failing) option """ + + def __init__(self, config): + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + self.lastfailed = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count = None + self._no_failures_behavior = self.config.getoption("last_failed_no_failures") + + def pytest_report_collectionfinish(self): + if self.active and self.config.getoption("verbose") >= 0: + if not self._previously_failed_count: + return None + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + mode = "rerun previous {count} {noun}{suffix}".format( + count=self._previously_failed_count, suffix=suffix, noun=noun + ) + return "run-last-failure: %s" % mode + + def pytest_runtest_logreport(self, report): + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report): + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + def pytest_collection_modifyitems(self, session, config, items): + if self.active: + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + if not previously_failed: + # running a subset of all tests with recorded failures outside + # of the set of tests currently executing + return + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: + items[:] = previously_failed + previously_passed + elif self._no_failures_behavior == "none": + config.hook.pytest_deselected(items=items) + items[:] = [] + + def pytest_sessionfinish(self, session): + config = self.config + if config.getoption("cacheshow") or hasattr(config, "slaveinput"): + return + + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin(object): + """ Plugin which implements the --nf (run new-first) option """ + + def __init__(self, config): + self.config = config + self.active = config.option.newfirst + self.cached_nodeids = config.cache.get("cache/nodeids", []) + + def pytest_collection_modifyitems(self, session, config, items): + if self.active: + new_items = OrderedDict() + other_items = OrderedDict() + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + six.itervalues(new_items) + ) + self._get_increasing_order(six.itervalues(other_items)) + self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)] + + def _get_increasing_order(self, items): + return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True) + + def pytest_sessionfinish(self, session): + config = self.config + if config.getoption("cacheshow") or hasattr(config, "slaveinput"): + return + + config.cache.set("cache/nodeids", self.cached_nodeids) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="rerun only the tests that failed " + "at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="run all tests but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="store_true", + dest="cacheshow", + help="show cache contents, don't perform collection or tests", + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="remove all cache contents at start of test run.", + ) + parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="change the behavior when no test failed in the last run or no " + "information about the last failures was found in the cache", + ) + + +def pytest_cmdline_main(config): + if config.option.cacheshow: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + + +@pytest.hookimpl(tryfirst=True) +def pytest_configure(config): + config.cache = Cache.for_config(config) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@pytest.fixture +def cache(request): + """ + Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be a ``/`` separated value, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + return request.config.cache + + +def pytest_report_header(config): + if config.option.verbose: + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootdir) + except ValueError: + displaypath = cachedir + return "cachedir: {}".format(displaypath) + + +def cacheshow(config, session): + from pprint import pformat + + tw = py.io.TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / "v" + tw.sep("-", "cache values") + for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()): + key = valpath.relative_to(vdir) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line("%s contains unreadable content, will be ignored" % key) + else: + tw.line("%s contains:" % key) + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / "d" + if ddir.is_dir(): + contents = sorted(ddir.rglob("*")) + tw.sep("-", "cache directories") + for p in contents: + # if p.check(dir=1): + # print("%s/" % p.relto(basedir)) + if p.is_file(): + key = p.relative_to(basedir) + tw.line("{} is a file of length {:d}".format(key, p.stat().st_size)) + return 0 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/cacheprovider.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/cacheprovider.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3be25dd5a55a892f6b5e6d131072874358d237dc Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/cacheprovider.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/capture.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/capture.py new file mode 100644 index 0000000000000000000000000000000000000000..bc50ccc3f08207824b5b360d3418c5d1b0ad3b66 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/capture.py @@ -0,0 +1,783 @@ +""" +per-test stdout/stderr capturing mechanism. + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import contextlib +import io +import os +import sys +from io import UnsupportedOperation +from tempfile import TemporaryFile + +import six + +import pytest +from _pytest.compat import CaptureIO + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + "--capture", + action="store", + default="fd" if hasattr(os, "dup") else "sys", + metavar="method", + choices=["fd", "sys", "no"], + help="per-test capturing method: one of fd|sys|no.", + ) + group._addoption( + "-s", + action="store_const", + const="no", + dest="capture", + help="shortcut for --capture=no.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_load_initial_conftests(early_config, parser, args): + ns = early_config.known_args_namespace + if ns.capture == "fd": + _py36_windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + _readline_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + early_config.add_cleanup(capman.stop_global_capturing) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + + early_config.add_cleanup(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.start_global_capturing() + outcome = yield + capman.suspend_global_capture() + if outcome.excinfo is not None: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + +class CaptureManager(object): + """ + Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each + test phase (setup, call, teardown). After each of those points, the captured output is obtained and + attached to the collection/runtest report. + + There are two levels of capture: + * global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled + during collection and each test phase. + * fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this + case special handling is needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method): + self._method = method + self._global_capturing = None + self._current_item = None + + def _getcapture(self, method): + if method == "fd": + return MultiCapture(out=True, err=True, Capture=FDCapture) + elif method == "sys": + return MultiCapture(out=True, err=True, Capture=SysCapture) + elif method == "no": + return MultiCapture(out=False, err=False, in_=False) + else: + raise ValueError("unknown capturing method: %r" % method) + + # Global capturing control + + def start_global_capturing(self): + assert self._global_capturing is None + self._global_capturing = self._getcapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self): + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self): + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_=False): + cap = getattr(self, "_global_capturing", None) + if cap is not None: + cap.suspend_capturing(in_=in_) + + def read_global_capture(self): + return self._global_capturing.readouterr() + + # Fixture Control (its just forwarding, think about removing this later) + + def activate_fixture(self, item): + """If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over + the global capture. + """ + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._start() + + def deactivate_fixture(self, item): + """Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any.""" + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture.close() + + def suspend_fixture(self, item): + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._suspend() + + def resume_fixture(self, item): + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self): + """Context manager to temporarily disables global and current fixture capturing.""" + # Need to undo local capsys-et-al if exists before disabling global capture + self.suspend_fixture(self._current_item) + self.suspend_global_capture(in_=False) + try: + yield + finally: + self.resume_global_capture() + self.resume_fixture(self._current_item) + + @contextlib.contextmanager + def item_capture(self, when, item): + self.resume_global_capture() + self.activate_fixture(item) + try: + yield + finally: + self.deactivate_fixture(item) + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @pytest.hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector): + if isinstance(collector, pytest.File): + self.resume_global_capture() + outcome = yield + self.suspend_global_capture() + out, err = self.read_global_capture() + rep = outcome.get_result() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_protocol(self, item): + self._current_item = item + yield + self._current_item = None + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item): + with self.item_capture("setup", item): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item): + with self.item_capture("call", item): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item): + with self.item_capture("teardown", item): + yield + + @pytest.hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self, excinfo): + self.stop_global_capturing() + + @pytest.hookimpl(tryfirst=True) + def pytest_internalerror(self, excinfo): + self.stop_global_capturing() + + +capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"} + + +def _ensure_only_one_capture_fixture(request, name): + fixtures = set(request.fixturenames) & capture_fixtures - {name} + if fixtures: + fixtures = sorted(fixtures) + fixtures = fixtures[0] if len(fixtures) == 1 else fixtures + raise request.raiseerror( + "cannot use {} and {} at the same time".format(fixtures, name) + ) + + +@pytest.fixture +def capsys(request): + """Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make + captured output available via ``capsys.readouterr()`` method calls + which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text`` + objects. + """ + _ensure_only_one_capture_fixture(request, "capsys") + with _install_capture_fixture_on_item(request, SysCapture) as fixture: + yield fixture + + +@pytest.fixture +def capsysbinary(request): + """Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make + captured output available via ``capsys.readouterr()`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes`` + objects. + """ + _ensure_only_one_capture_fixture(request, "capsysbinary") + # Currently, the implementation uses the python3 specific `.buffer` + # property of CaptureIO. + if sys.version_info < (3,): + raise request.raiseerror("capsysbinary is only supported on python 3") + with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture: + yield fixture + + +@pytest.fixture +def capfd(request): + """Enable capturing of writes to file descriptors ``1`` and ``2`` and make + captured output available via ``capfd.readouterr()`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text`` + objects. + """ + _ensure_only_one_capture_fixture(request, "capfd") + if not hasattr(os, "dup"): + pytest.skip( + "capfd fixture needs os.dup function which is not available in this system" + ) + with _install_capture_fixture_on_item(request, FDCapture) as fixture: + yield fixture + + +@pytest.fixture +def capfdbinary(request): + """Enable capturing of write to file descriptors 1 and 2 and make + captured output available via ``capfdbinary.readouterr`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be + ``bytes`` objects. + """ + _ensure_only_one_capture_fixture(request, "capfdbinary") + if not hasattr(os, "dup"): + pytest.skip( + "capfdbinary fixture needs os.dup function which is not available in this system" + ) + with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture: + yield fixture + + +@contextlib.contextmanager +def _install_capture_fixture_on_item(request, capture_class): + """ + Context manager which creates a ``CaptureFixture`` instance and "installs" it on + the item/node of the given request. Used by ``capsys`` and ``capfd``. + + The CaptureFixture is added as attribute of the item because it needs to accessed + by ``CaptureManager`` during its ``pytest_runtest_*`` hooks. + """ + request.node._capture_fixture = fixture = CaptureFixture(capture_class, request) + capmanager = request.config.pluginmanager.getplugin("capturemanager") + # need to active this fixture right away in case it is being used by another fixture (setup phase) + # if this fixture is being used only by a test function (call phase), then we wouldn't need this + # activation, but it doesn't hurt + capmanager.activate_fixture(request.node) + yield fixture + fixture.close() + del request.node._capture_fixture + + +class CaptureFixture(object): + """ + Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary` + fixtures. + """ + + def __init__(self, captureclass, request): + self.captureclass = captureclass + self.request = request + self._capture = None + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + + def _start(self): + # Start if not started yet + if getattr(self, "_capture", None) is None: + self._capture = MultiCapture( + out=True, err=True, in_=False, Capture=self.captureclass + ) + self._capture.start_capturing() + + def close(self): + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self): + """Read and return the captured output so far, resetting the internal buffer. + + :return: captured content as a namedtuple with ``out`` and ``err`` string attributes + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self): + """Suspends this fixture's own capturing temporarily.""" + self._capture.suspend_capturing() + + def _resume(self): + """Resumes this fixture's own capturing temporarily.""" + self._capture.resume_capturing() + + @contextlib.contextmanager + def disabled(self): + """Temporarily disables capture while inside the 'with' block.""" + capmanager = self.request.config.pluginmanager.getplugin("capturemanager") + with capmanager.global_and_fixture_disabled(): + yield + + +def safe_text_dupfile(f, mode, default_encoding="UTF8"): + """ return an open text file object that's a duplicate of f on the + FD-level if possible. + """ + encoding = getattr(f, "encoding", None) + try: + fd = f.fileno() + except Exception: + if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"): + # we seem to have a text stream, let's just use it + return f + else: + newfd = os.dup(fd) + if "b" not in mode: + mode += "b" + f = os.fdopen(newfd, mode, 0) # no buffering + return EncodedFile(f, encoding or default_encoding) + + +class EncodedFile(object): + errors = "strict" # possibly needed by py3 code (issue555) + + def __init__(self, buffer, encoding): + self.buffer = buffer + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, six.text_type): + obj = obj.encode(self.encoding, "replace") + self.buffer.write(obj) + + def writelines(self, linelist): + data = "".join(linelist) + self.write(data) + + @property + def name(self): + """Ensure that file.name is a string.""" + return repr(self.buffer) + + def __getattr__(self, name): + return getattr(object.__getattribute__(self, "buffer"), name) + + +CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"]) + + +class MultiCapture(object): + out = err = in_ = None + + def __init__(self, out=True, err=True, in_=True, Capture=None): + if in_: + self.in_ = Capture(0) + if out: + self.out = Capture(1) + if err: + self.err = Capture(2) + + def start_capturing(self): + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self): + """ pop current snapshot out/err capture and flush to orig streams. """ + out, err = self.readouterr() + if out: + self.out.writeorg(out) + if err: + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_=False): + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self): + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if hasattr(self, "_in_suspended"): + self.in_.resume() + del self._in_suspended + + def stop_capturing(self): + """ stop capturing and reset capturing streams """ + if hasattr(self, "_reset"): + raise ValueError("was already stopped") + self._reset = True + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def readouterr(self): + """ return snapshot unicode value of stdout/stderr capturings. """ + return CaptureResult( + self.out.snap() if self.out is not None else "", + self.err.snap() if self.err is not None else "", + ) + + +class NoCapture(object): + EMPTY_BUFFER = None + __init__ = start = done = suspend = resume = lambda *args: None + + +class FDCaptureBinary(object): + """Capture IO to/from a given os-level filedescriptor. + + snap() produces `bytes` + """ + + EMPTY_BUFFER = bytes() + + def __init__(self, targetfd, tmpfile=None): + self.targetfd = targetfd + try: + self.targetfd_save = os.dup(self.targetfd) + except OSError: + self.start = lambda: None + self.done = lambda: None + else: + if targetfd == 0: + assert not tmpfile, "cannot set tmpfile with stdin" + tmpfile = open(os.devnull, "r") + self.syscapture = SysCapture(targetfd) + else: + if tmpfile is None: + f = TemporaryFile() + with f: + tmpfile = safe_text_dupfile(f, mode="wb+") + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, tmpfile) + else: + self.syscapture = NoCapture() + self.tmpfile = tmpfile + self.tmpfile_fd = tmpfile.fileno() + + def __repr__(self): + return "" % (self.targetfd, self.targetfd_save) + + def start(self): + """ Start capturing on targetfd using memorized tmpfile. """ + try: + os.fstat(self.targetfd_save) + except (AttributeError, OSError): + raise ValueError("saved filedescriptor not valid anymore") + os.dup2(self.tmpfile_fd, self.targetfd) + self.syscapture.start() + + def snap(self): + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self): + """ stop capturing, restore streams, return original capture file, + seeked to position zero. """ + targetfd_save = self.__dict__.pop("targetfd_save") + os.dup2(targetfd_save, self.targetfd) + os.close(targetfd_save) + self.syscapture.done() + _attempt_to_close_capture_file(self.tmpfile) + + def suspend(self): + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + + def resume(self): + self.syscapture.resume() + os.dup2(self.tmpfile_fd, self.targetfd) + + def writeorg(self, data): + """ write to original file descriptor. """ + if isinstance(data, six.text_type): + data = data.encode("utf8") # XXX use encoding of original stream + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBinary): + """Capture IO to/from a given os-level filedescriptor. + + snap() produces text + """ + + EMPTY_BUFFER = str() + + def snap(self): + res = FDCaptureBinary.snap(self) + enc = getattr(self.tmpfile, "encoding", None) + if enc and isinstance(res, bytes): + res = six.text_type(res, enc, "replace") + return res + + +class SysCapture(object): + + EMPTY_BUFFER = str() + + def __init__(self, fd, tmpfile=None): + name = patchsysdict[fd] + self._old = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() + self.tmpfile = tmpfile + + def start(self): + setattr(sys, self.name, self.tmpfile) + + def snap(self): + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def done(self): + setattr(sys, self.name, self._old) + del self._old + _attempt_to_close_capture_file(self.tmpfile) + + def suspend(self): + setattr(sys, self.name, self._old) + + def resume(self): + setattr(sys, self.name, self.tmpfile) + + def writeorg(self, data): + self._old.write(data) + self._old.flush() + + +class SysCaptureBinary(SysCapture): + EMPTY_BUFFER = bytes() + + def snap(self): + res = self.tmpfile.buffer.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + +class DontReadFromInput(six.Iterator): + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + + encoding = None + + def read(self, *args): + raise IOError("reading from stdin while output is captured") + + readline = read + readlines = read + __next__ = read + + def __iter__(self): + return self + + def fileno(self): + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass + + @property + def buffer(self): + if sys.version_info >= (3, 0): + return self + else: + raise AttributeError("redirected stdin has no attribute buffer") + + +def _colorama_workaround(): + """ + Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + + if not sys.platform.startswith("win32"): + return + try: + import colorama # noqa + except ImportError: + pass + + +def _readline_workaround(): + """ + Ensure readline is imported so that it attaches to the correct stdio + handles on Windows. + + Pdb uses readline support where available--when not running from the Python + prompt, the readline module is not imported until running the pdb REPL. If + running pytest with the --pdb option this means the readline module is not + imported until after I/O capture has been started. + + This is a problem for pyreadline, which is often used to implement readline + support on Windows, as it does not attach to the correct handles for stdout + and/or stdin if they have been redirected by the FDCapture mechanism. This + workaround ensures that readline is imported before I/O capture is setup so + that it can attach to the actual stdin/out for the console. + + See https://github.com/pytest-dev/pytest/pull/1281 + """ + + if not sys.platform.startswith("win32"): + return + try: + import readline # noqa + except ImportError: + pass + + +def _py36_windowsconsoleio_workaround(stream): + """ + Python 3.6 implemented unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103 + """ + if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6): + return + + # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) + if not hasattr(stream, "buffer"): + return + + buffered = hasattr(stream.buffer, "raw") + raw_stdout = stream.buffer.raw if buffered else stream.buffer + + if not isinstance(raw_stdout, io._WindowsConsoleIO): + return + + def _reopen_stdio(f, mode): + if not buffered and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +def _attempt_to_close_capture_file(f): + """Suppress IOError when closing the temporary file used for capturing streams in py27 (#2370)""" + if six.PY2: + try: + f.close() + except IOError: + pass + else: + f.close() diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/capture.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/capture.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5987f9210eee911d6d80734668e491ab499efb7 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/capture.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/compat.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..4af0a2339727cdec7555d2bb2139cadfad837d9e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/compat.py @@ -0,0 +1,422 @@ +""" +python version compatibility code +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import codecs +import functools +import inspect +import re +import sys +from contextlib import contextmanager + +import py +import six +from six import text_type + +import _pytest +from _pytest.outcomes import fail +from _pytest.outcomes import TEST_OUTCOME + +try: + import enum +except ImportError: # pragma: no cover + # Only available in Python 3.4+ or as a backport + enum = None + +_PY3 = sys.version_info > (3, 0) +_PY2 = not _PY3 + + +if _PY3: + from inspect import signature, Parameter as Parameter +else: + from funcsigs import signature, Parameter as Parameter + +NoneType = type(None) +NOTSET = object() + +PY35 = sys.version_info[:2] >= (3, 5) +PY36 = sys.version_info[:2] >= (3, 6) +MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError" + + +if _PY3: + from collections.abc import MutableMapping as MappingMixin + from collections.abc import Mapping, Sequence +else: + # those raise DeprecationWarnings in Python >=3.7 + from collections import MutableMapping as MappingMixin # noqa + from collections import Mapping, Sequence # noqa + + +if sys.version_info >= (3, 4): + from importlib.util import spec_from_file_location +else: + + def spec_from_file_location(*_, **__): + return None + + +def _format_args(func): + return str(signature(func)) + + +isfunction = inspect.isfunction +isclass = inspect.isclass +# used to work around a python2 exception info leak +exc_clear = getattr(sys, "exc_clear", lambda: None) +# The type of re.compile objects is not exposed in Python. +REGEX_TYPE = type(re.compile("")) + + +def is_generator(func): + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function. + + Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly, + which in turns also initializes the "logging" module as side-effect (see issue #8). + """ + return getattr(func, "_is_coroutine", False) or ( + hasattr(inspect, "iscoroutinefunction") and inspect.iscoroutinefunction(func) + ) + + +def getlocation(function, curdir): + function = get_real_func(function) + fn = py.path.local(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if fn.relto(curdir): + fn = fn.relto(curdir) + return "%s:%d" % (fn, lineno + 1) + + +def num_mock_patch_args(function): + """ return number of arguments used up by mock arguments (if any) """ + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")] + if any(mock_modules): + sentinels = [m.DEFAULT for m in mock_modules if m is not None] + return len( + [p for p in patchings if not p.attribute_name and p.new in sentinels] + ) + return len(patchings) + + +def getfuncargnames(function, is_method=False, cls=None): + """Returns the names of a function's mandatory arguments. + + This should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The is_method and cls arguments indicate that the function should + be treated as a bound method even though it's not unless, only in + the case of cls, the function is a static method. + + @RonnyPfannschmidt: This function should be refactored when we + revisit fixtures. The fixture mechanism should ask the node for + the fixture names, and not try to obtain directly from the + function object well after collection has occurred. + + """ + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters + except (ValueError, TypeError) as e: + fail( + "Could not determine arguments of {!r}: {}".format(function, e), + pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters.values() + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, remove the first + # parameter name. + if is_method or ( + cls and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +@contextmanager +def dummy_context_manager(): + """Context manager that does nothing, useful in situations where you might need an actual context manager or not + depending on some condition. Using this allow to keep the same code""" + yield + + +def get_default_arg_names(function): + # Note: this code intentionally mirrors the code at the beginning of getfuncargnames, + # to get the arguments which were excluded from its result because they had default values + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +if _PY3: + STRING_TYPES = bytes, str + UNICODE_TYPES = six.text_type + + if PY35: + + def _bytes_to_ascii(val): + return val.decode("ascii", "backslashreplace") + + else: + + def _bytes_to_ascii(val): + if val: + # source: http://goo.gl/bGsnwC + encoded_bytes, _ = codecs.escape_encode(val) + return encoded_bytes.decode("ascii") + else: + # empty bytes crashes codecs.escape_encode (#1087) + return "" + + def ascii_escaped(val): + """If val is pure ascii, returns it as a str(). Otherwise, escapes + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6' + + and escapes unicode objects into a sequence of escaped unicode + ids, e.g.: + + '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944' + + note: + the obvious "v.decode('unicode-escape')" will return + valid utf-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a utf-8 string. + + """ + if isinstance(val, bytes): + return _bytes_to_ascii(val) + else: + return val.encode("unicode_escape").decode("ascii") + + +else: + STRING_TYPES = six.string_types + UNICODE_TYPES = six.text_type + + def ascii_escaped(val): + """In py2 bytes and str are the same type, so return if it's a bytes + object, return it unchanged if it is a full ascii string, + otherwise escape it into its binary form. + + If it's a unicode string, change the unicode characters into + unicode escapes. + + """ + if isinstance(val, bytes): + try: + return val.encode("ascii") + except UnicodeDecodeError: + return val.encode("string-escape") + else: + return val.encode("unicode-escape") + + +class _PytestWrapper(object): + """Dummy wrapper around a function object for internal use only. + + Used to correctly unwrap the underlying function object + when we are creating fixtures, because we wrap the function object ourselves with a decorator + to issue warnings when the fixture function is called directly. + """ + + def __init__(self, obj): + self.obj = obj + + +def get_real_func(obj): + """ gets the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial. + """ + start_obj = obj + for i in range(100): + # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function + # to trigger a warning if it gets called directly instead of by pytest: we don't + # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) + new_obj = getattr(obj, "__pytest_wrapped__", None) + if isinstance(new_obj, _PytestWrapper): + obj = new_obj.obj + break + new_obj = getattr(obj, "__wrapped__", None) + if new_obj is None: + break + obj = new_obj + else: + raise ValueError( + ("could not find real function of {start}\nstopped at {current}").format( + start=py.io.saferepr(start_obj), current=py.io.saferepr(obj) + ) + ) + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def get_real_method(obj, holder): + """ + Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time + returning a bound method to ``holder`` if the original object was a bound method. + """ + try: + is_method = hasattr(obj, "__func__") + obj = get_real_func(obj) + except Exception: + return obj + if is_method and hasattr(obj, "__get__") and callable(obj.__get__): + obj = obj.__get__(holder) + return obj + + +def getfslineno(obj): + # xxx let decorators etc specify a sane ordering + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as + fslineno = _pytest._code.getfslineno(obj) + assert isinstance(fslineno[1], int), obj + return fslineno + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object, name, default): + """ Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException + instead of Exception (for more details check #2707) + """ + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def _is_unittest_unexpected_success_a_failure(): + """Return if the test suite should fail if an @expectedFailure unittest test PASSES. + + From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: + Changed in version 3.4: Returns False if there were any + unexpectedSuccesses from tests marked with the expectedFailure() decorator. + """ + return sys.version_info >= (3, 4) + + +if _PY3: + + def safe_str(v): + """returns v as string""" + return str(v) + + +else: + + def safe_str(v): + """returns v as string, converting to ascii if necessary""" + try: + return str(v) + except UnicodeError: + if not isinstance(v, text_type): + v = text_type(v) + errors = "replace" + return v.encode("utf-8", errors) + + +COLLECT_FAKEMODULE_ATTRIBUTES = ( + "Collector", + "Module", + "Generator", + "Function", + "Instance", + "Session", + "Item", + "Class", + "File", + "_fillfuncargs", +) + + +def _setup_collect_fakemodule(): + from types import ModuleType + import pytest + + pytest.collect = ModuleType("pytest.collect") + pytest.collect.__all__ = [] # used for setns + for attr in COLLECT_FAKEMODULE_ATTRIBUTES: + setattr(pytest.collect, attr, getattr(pytest, attr)) + + +if _PY2: + # Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO. + from py.io import TextIO + + class CaptureIO(TextIO): + @property + def encoding(self): + return getattr(self, "_encoding", "UTF-8") + + +else: + import io + + class CaptureIO(io.TextIOWrapper): + def __init__(self): + super(CaptureIO, self).__init__( + io.BytesIO(), encoding="UTF-8", newline="", write_through=True + ) + + def getvalue(self): + return self.buffer.getvalue().decode("UTF-8") + + +class FuncargnamesCompatAttr(object): + """ helper class so that Metafunc, Function and FixtureRequest + don't need to each define the "funcargnames" compatibility attribute. + """ + + @property + def funcargnames(self): + """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" + return self.fixturenames diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/compat.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/compat.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b031e8342b180dd79637ba958591b22e9b962d01 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/compat.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8460562ab81a52b126ed180354bf8d5cd7632178 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/__init__.py @@ -0,0 +1,1042 @@ +""" command line options, ini-file and conftest.py processing. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import copy +import inspect +import os +import shlex +import sys +import types +import warnings + +import py +import six +from pluggy import HookimplMarker +from pluggy import HookspecMarker +from pluggy import PluginManager + +import _pytest._code +import _pytest.assertion +import _pytest.hookspec # the extension point definitions +from .exceptions import PrintHelp +from .exceptions import UsageError +from .findpaths import determine_setup +from .findpaths import exists +from _pytest._code import ExceptionInfo +from _pytest._code import filter_traceback +from _pytest.compat import safe_str +from _pytest.outcomes import Skipped + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +class ConftestImportFailure(Exception): + def __init__(self, path, excinfo): + Exception.__init__(self, path, excinfo) + self.path = path + self.excinfo = excinfo + + +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + from _pytest.main import EXIT_USAGEERROR + + try: + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + exc_info = ExceptionInfo(e.excinfo) + tw = py.io.TerminalWriter(sys.stderr) + tw.line( + "ImportError while loading conftest '{e.path}'.".format(e=e), red=True + ) + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = safe_str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + return 4 + else: + try: + return config.hook.pytest_cmdline_main(config=config) + finally: + config._ensure_unconfigure() + except UsageError as e: + tw = py.io.TerminalWriter(sys.stderr) + for msg in e.args: + tw.line("ERROR: {}\n".format(msg), red=True) + return EXIT_USAGEERROR + + +class cmdline(object): # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path, optname): + """ Argparse type validator for filename arguments. + + :path: path of filename + :optname: name of the option + """ + if os.path.isdir(path): + raise UsageError("{} must be a filename, given: {}".format(optname, path)) + return path + + +def directory_arg(path, optname): + """Argparse type validator for directory arguments. + + :path: path of directory + :optname: name of the option + """ + if not os.path.isdir(path): + raise UsageError("{} must be a directory, given: {}".format(optname, path)) + return path + + +default_plugins = ( + "mark", + "main", + "terminal", + "runner", + "python", + "fixtures", + "debugging", + "unittest", + "capture", + "skipping", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "helpconfig", + "nose", + "assertion", + "junitxml", + "resultlog", + "doctest", + "cacheprovider", + "freeze_support", + "setuponly", + "setupplan", + "warnings", + "logging", +) + + +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") + + +def get_config(): + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config(pluginmanager) + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return config + + +def get_plugin_manager(): + """ + Obtain a new instance of the + :py:class:`_pytest.config.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig(args=None, plugins=None): + warning = None + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = shlex.split(args, posix=sys.platform != "win32") + from _pytest import deprecated + + warning = deprecated.MAIN_STR_ARGS + config = get_config() + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, six.string_types): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + if warning: + from _pytest.warnings import _issue_config_warning + + _issue_config_warning(warning, config=config) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + except BaseException: + config._ensure_unconfigure() + raise + + +class PytestPluginManager(PluginManager): + """ + Overwrites :py:class:`pluggy.PluginManager ` to add pytest-specific + functionality: + + * loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded; + * ``conftest.py`` loading during start-up; + """ + + def __init__(self): + super(PytestPluginManager, self).__init__("pytest") + self._conftest_plugins = set() + + # state related to local conftest plugins + self._path2confmods = {} + self._conftestpath2mod = {} + self._confcutdir = None + self._noconftest = False + self._duplicatepaths = set() + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err = sys.stderr + encoding = getattr(err, "encoding", "utf8") + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage + self._configured = False + + def addhooks(self, module_or_class): + """ + .. deprecated:: 2.8 + + Use :py:meth:`pluggy.PluginManager.add_hookspecs ` + instead. + """ + warning = dict( + code="I2", + fslocation=_pytest._code.getfslineno(sys._getframe(1)), + nodeid=None, + message="use pluginmanager.add_hookspecs instead of " + "deprecated addhooks() method.", + ) + self._warn(warning) + return self.add_hookspecs(module_or_class) + + def parse_hookimpl_opts(self, plugin, name): + # pytest hooks are always prefixed with pytest_ + # so we avoid accessing possibly non-readable attributes + # (see issue #1073) + if not name.startswith("pytest_"): + return + # ignore some historic special names which can not be hooks anyway + if name == "pytest_plugins" or name.startswith("pytest_funcarg__"): + return + + method = getattr(plugin, name) + opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name) + + # consider only actual functions for hooks (#3775) + if not inspect.isroutine(method): + return + + # collect unmarked hooks as long as they have the `pytest_' prefix + if opts is None and name.startswith("pytest_"): + opts = {} + + if opts is not None: + for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): + opts.setdefault(name, hasattr(method, name)) + return opts + + def parse_hookspec_opts(self, module_or_class, name): + opts = super(PytestPluginManager, self).parse_hookspec_opts( + module_or_class, name + ) + if opts is None: + method = getattr(module_or_class, name) + if name.startswith("pytest_"): + opts = { + "firstresult": hasattr(method, "firstresult"), + "historic": hasattr(method, "historic"), + } + return opts + + def register(self, plugin, name=None): + if name in ["pytest_catchlog", "pytest_capturelog"]: + self._warn( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + return + ret = super(PytestPluginManager, self).register(plugin, name) + if ret: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict(plugin=plugin, manager=self) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return ret + + def getplugin(self, name): + # support deprecated naming because plugins (xdist e.g.) use it + return self.get_plugin(name) + + def hasplugin(self, name): + """Return True if the plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config): + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.", + ) + self._configured = True + + def _warn(self, message): + kwargs = ( + message + if isinstance(message, dict) + else {"code": "I1", "message": message, "fslocation": None, "nodeid": None} + ) + self.hook.pytest_logwarning.call_historic(kwargs=kwargs) + + # + # internal API for local conftest plugin handling + # + def _set_initial_conftests(self, namespace): + """ load initial conftest files given a preparsed "namespace". + As conftest files may add their own command line options + which have arguments ('--my-opt somepath') we might get some + false positives. All builtin and 3rd party plugins will have + been loaded, however, so common options will not confuse our logic + here. + """ + current = py.path.local() + self._confcutdir = ( + current.join(namespace.confcutdir, abs=True) + if namespace.confcutdir + else None + ) + self._noconftest = namespace.noconftest + self._using_pyargs = namespace.pyargs + testpaths = namespace.file_or_dir + foundanchor = False + for path in testpaths: + path = str(path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = current.join(path, abs=1) + if exists(anchor): # we found some file object + self._try_load_conftest(anchor) + foundanchor = True + if not foundanchor: + self._try_load_conftest(current) + + def _try_load_conftest(self, anchor): + self._getconftestmodules(anchor) + # let's also consider test* subdirs + if anchor.check(dir=1): + for x in anchor.listdir("test*"): + if x.check(dir=1): + self._getconftestmodules(x) + + def _getconftestmodules(self, path): + if self._noconftest: + return [] + + try: + return self._path2confmods[path] + except KeyError: + if path.isfile(): + directory = path.dirpath() + else: + directory = path + # XXX these days we may rather want to use config.rootdir + # and allow users to opt into looking into the rootdir parent + # directories instead of requiring to specify confcutdir + clist = [] + for parent in directory.realpath().parts(): + if self._confcutdir and self._confcutdir.relto(parent): + continue + conftestpath = parent.join("conftest.py") + if conftestpath.isfile(): + mod = self._importconftest(conftestpath) + clist.append(mod) + + self._path2confmods[path] = clist + return clist + + def _rget_with_confmod(self, name, path): + modules = self._getconftestmodules(path) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest(self, conftestpath): + try: + return self._conftestpath2mod[conftestpath] + except KeyError: + pkgpath = conftestpath.pypkgpath() + if pkgpath is None: + _ensure_removed_sysmodule(conftestpath.purebasename) + try: + mod = conftestpath.pyimport() + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + from _pytest.deprecated import ( + PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST + ) + + warnings.warn_explicit( + PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST, + category=None, + filename=str(conftestpath), + lineno=0, + ) + except Exception: + raise ConftestImportFailure(conftestpath, sys.exc_info()) + + self._conftest_plugins.add(mod) + self._conftestpath2mod[conftestpath] = mod + dirpath = conftestpath.dirpath() + if dirpath in self._path2confmods: + for path, mods in self._path2confmods.items(): + if path and path.relto(dirpath) or path == dirpath: + assert mod not in mods + mods.append(mod) + self.trace("loaded conftestmodule %r" % (mod)) + self.consider_conftest(mod) + return mod + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse(self, args): + for opt1, opt2 in zip(args, args[1:]): + if opt1 == "-p": + self.consider_pluginarg(opt2) + + def consider_pluginarg(self, arg): + if arg.startswith("no:"): + name = arg[3:] + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + self.import_plugin(arg) + + def consider_conftest(self, conftestmodule): + self.register(conftestmodule, name=conftestmodule.__file__) + + def consider_env(self): + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod): + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs(self, spec): + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname): + # most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, (six.text_type, str)), ( + "module name as text required, got %r" % modname + ) + modname = str(modname) + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + if modname in builtin_plugins: + importspec = "_pytest." + modname + else: + importspec = modname + self.rewrite_hook.mark_rewrite(importspec) + try: + __import__(importspec) + except ImportError as e: + new_exc_type = ImportError + new_exc_message = 'Error importing plugin "%s": %s' % ( + modname, + safe_str(e.args[0]), + ) + new_exc = new_exc_type(new_exc_message) + + six.reraise(new_exc_type, new_exc, sys.exc_info()[2]) + + except Skipped as e: + self._warn("skipped plugin %r: %s" % ((modname, e.msg))) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list(specs): + """ + Parses a list of "plugin specs" and returns a list of plugin names. + + Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in + which case it is returned as a list. Specs can also be `None` in which case an + empty list is returned. + """ + if specs is not None: + if isinstance(specs, str): + specs = specs.split(",") if specs else [] + if not isinstance(specs, (list, tuple)): + raise UsageError( + "Plugin specs must be a ','-separated string or a " + "list/tuple of strings for plugin names. Given: %r" % specs + ) + return list(specs) + return [] + + +def _ensure_removed_sysmodule(modname): + try: + del sys.modules[modname] + except KeyError: + pass + + +class Notset(object): + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files): + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + yield package_name + + +class Config(object): + """ access to configuration values, pluginmanager and plugin hooks. """ + + def __init__(self, pluginmanager): + #: access to command line option as attributes. + #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead + self.option = argparse.Namespace() + from .argparsing import Parser, FILE_OR_DIR + + _a = FILE_OR_DIR + self._parser = Parser( + usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a), + processopt=self._processopt, + ) + #: a pluginmanager instance + self.pluginmanager = pluginmanager + self.trace = self.pluginmanager.trace.root.get("config") + self.hook = self.pluginmanager.hook + self._inicache = {} + self._override_ini = () + self._opt2dest = {} + self._cleanup = [] + self._warn = self.pluginmanager._warn + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + + def do_setns(dic): + import pytest + + setns(pytest, dic) + + self.hook.pytest_namespace.call_historic(do_setns, {}) + self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser)) + + def add_cleanup(self, func): + """ Add a function to be called when the config object gets out of + use (usually coninciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self): + assert not self._configured + self._configured = True + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self): + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() + + def warn(self, code, message, fslocation=None, nodeid=None): + """ + .. deprecated:: 3.8 + + Use :py:func:`warnings.warn` or :py:func:`warnings.warn_explicit` directly instead. + + Generate a warning for this test session. + """ + from _pytest.warning_types import RemovedInPytest4Warning + + if isinstance(fslocation, (tuple, list)) and len(fslocation) > 2: + filename, lineno = fslocation[:2] + else: + filename = "unknown file" + lineno = 0 + msg = "config.warn has been deprecated, use warnings.warn instead" + if nodeid: + msg = "{}: {}".format(nodeid, msg) + warnings.warn_explicit( + RemovedInPytest4Warning(msg), + category=None, + filename=filename, + lineno=lineno, + ) + self.hook.pytest_logwarning.call_historic( + kwargs=dict( + code=code, message=message, fslocation=fslocation, nodeid=nodeid + ) + ) + + def get_terminal_writer(self): + return self.pluginmanager.get_plugin("terminalreporter")._tw + + def pytest_cmdline_parse(self, pluginmanager, args): + # REF1 assert self == pluginmanager.config, (self, pluginmanager.config) + self.parse(args) + return self + + def notify_exception(self, excinfo, option=None): + if option and option.fulltrace: + style = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write("INTERNALERROR> %s\n" % line) + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid): + # nodeid's are relative to the rootpath, compute relative to cwd + if self.invocation_dir != self.rootdir: + fullpath = self.rootdir.join(nodeid) + nodeid = self.invocation_dir.bestrelpath(fullpath) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args): + """ constructor useable for subprocesses. """ + config = get_config() + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt): + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default") and opt.dest: + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config): + self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) + + def _initini(self, args): + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + r = determine_setup( + ns.inifilename, + ns.file_or_dir + unknown_args, + rootdir_cmd_arg=ns.rootdir or None, + config=self, + ) + self.rootdir, self.inifile, self.inicfg = r + self._parser.extra_info["rootdir"] = self.rootdir + self._parser.extra_info["inifile"] = self.inifile + self.invocation_dir = py.path.local() + self._parser.addini("addopts", "extra command line options", "args") + self._parser.addini("minversion", "minimally required pytest version") + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args): + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = ns.assertmode + if mode == "rewrite": + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook) + _warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite(self, hook): + """ + Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins. + """ + import pkg_resources + + self.pluginmanager.rewrite_hook = hook + + if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # We don't autoload from setuptools entry points, no need to continue. + return + + # 'RECORD' available for plugins installed normally (pip install) + # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e) + # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa + # so it shouldn't be an issue + metadata_files = "RECORD", "SOURCES.txt" + + package_files = ( + entry.split(",")[0] + for entrypoint in pkg_resources.iter_entry_points("pytest11") + for metadata in metadata_files + for entry in entrypoint.dist._get_metadata(metadata) + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _preparse(self, args, addopts=True): + if addopts: + args[:] = shlex.split(os.environ.get("PYTEST_ADDOPTS", "")) + args + self._initini(args) + if addopts: + args[:] = self.getini("addopts") + args + self._checkversion() + self._consider_importhook(args) + self.pluginmanager.consider_preparse(args) + if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # Don't autoload from setuptools entry point. Only explicitly specified + # plugins are going to be loaded. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + self.pluginmanager.consider_env() + self.known_args_namespace = ns = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + if self.known_args_namespace.confcutdir is None and self.inifile: + confcutdir = py.path.local(self.inifile).dirname + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure: + e = sys.exc_info()[1] + if ns.help or ns.version: + # we don't want to prevent --help/--version to work + # so just let is pass and print a warning at the end + self._warn("could not load initial conftests (%s)\n" % e.path) + else: + raise + + def _checkversion(self): + import pytest + + minver = self.inicfg.get("minversion", None) + if minver: + ver = minver.split(".") + myver = pytest.__version__.split(".") + if myver < ver: + raise pytest.UsageError( + "%s:%d: requires pytest-%s, actual pytest-%s'" + % ( + self.inicfg.config.path, + self.inicfg.lineof("minversion"), + minver, + pytest.__version__, + ) + ) + + def parse(self, args, addopts=True): + # parse given cmdline arguments into this config object. + assert not hasattr( + self, "args" + ), "can only parse cmdline args at most once per Config object" + self._origargs = args + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + self._preparse(args, addopts=addopts) + # XXX deprecated hook: + self.hook.pytest_cmdline_preparse(config=self, args=args) + self._parser.after_preparse = True + try: + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) + if not args: + cwd = os.getcwd() + if cwd == self.rootdir: + args = self.getini("testpaths") + if not args: + args = [cwd] + self.args = args + except PrintHelp: + pass + + def addinivalue_line(self, name, line): + """ add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes the + the first line in its value. """ + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name): + """ return configuration value from an :ref:`ini file `. If the + specified name hasn't been registered through a prior + :py:func:`parser.addini <_pytest.config.Parser.addini>` + call (usually from a plugin), a ValueError is raised. """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + def _getini(self, name): + try: + description, type, default = self._parser._inidict[name] + except KeyError: + raise ValueError("unknown configuration value: %r" % (name,)) + value = self._get_override_ini_value(name) + if value is None: + try: + value = self.inicfg[name] + except KeyError: + if default is not None: + return default + if type is None: + return "" + return [] + if type == "pathlist": + dp = py.path.local(self.inicfg.config.path).dirpath() + values = [] + for relpath in shlex.split(value): + values.append(dp.join(relpath, abs=True)) + return values + elif type == "args": + return shlex.split(value) + elif type == "linelist": + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + elif type == "bool": + return bool(_strtobool(value.strip())) + else: + assert type is None + return value + + def _getconftest_pathlist(self, name, path): + try: + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) + except KeyError: + return None + modpath = py.path.local(mod.__file__).dirpath() + values = [] + for relroot in relroots: + if not isinstance(relroot, py.path.local): + relroot = relroot.replace("/", py.path.local.sep) + relroot = modpath.join(relroot, abs=True) + values.append(relroot) + return values + + def _get_override_ini_value(self, name): + value = None + # override_ini is a list of "ini=value" options + # always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2 + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError: + raise UsageError("-o/--override-ini expects option=value style.") + else: + if key == name: + value = user_ini_value + return value + + def getoption(self, name, default=notset, skip=False): + """ return command line option value. + + :arg name: name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :arg default: default value if no option of that name exists. + :arg skip: if True raise pytest.skip if option does not exists + or has a None value. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip("no %r option found" % (name,)) + raise ValueError("no option named %r" % (name,)) + + def getvalue(self, name, path=None): + """ (deprecated, use getoption()) """ + return self.getoption(name) + + def getvalueorskip(self, name, path=None): + """ (deprecated, use getoption(skip=True)) """ + return self.getoption(name, skip=True) + + +def _assertion_supported(): + try: + assert False + except AssertionError: + return True + else: + return False + + +def _warn_about_missing_assertion(mode): + if not _assertion_supported(): + if mode == "plain": + sys.stderr.write( + "WARNING: ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + sys.stderr.write( + "WARNING: assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + + +def setns(obj, dic): + import pytest + + for name, value in dic.items(): + if isinstance(value, dict): + mod = getattr(obj, name, None) + if mod is None: + modname = "pytest.%s" % name + mod = types.ModuleType(modname) + sys.modules[modname] = mod + mod.__all__ = [] + setattr(obj, name, mod) + obj.__all__.append(name) + setns(mod, value) + else: + setattr(obj, name, value) + obj.__all__.append(name) + # if obj != pytest: + # pytest.__all__.append(name) + setattr(pytest, name, value) + + +def create_terminal_writer(config, *args, **kwargs): + """Create a TerminalWriter instance configured according to the options + in the config object. Every code which requires a TerminalWriter object + and has access to a config object should use this function. + """ + tw = py.io.TerminalWriter(*args, **kwargs) + if config.option.color == "yes": + tw.hasmarkup = True + if config.option.color == "no": + tw.hasmarkup = False + return tw + + +def _strtobool(val): + """Convert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: copied from distutils.util + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif val in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError("invalid truth value %r" % (val,)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a7a33aea21e83f835b2807bda6396d254c58759 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/argparsing.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/argparsing.py new file mode 100644 index 0000000000000000000000000000000000000000..5012456b97518b857a906c02d58ae221d23e4506 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,409 @@ +import argparse +import sys as _sys +import warnings +from gettext import gettext as _ + +import py +import six + +from ..main import EXIT_USAGEERROR + +FILE_OR_DIR = "file_or_dir" + + +class Parser(object): + """ Parser for command line arguments and ini-file values. + + :ivar extra_info: dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + def __init__(self, usage=None, processopt=None): + self._anonymous = OptionGroup("custom options", parser=self) + self._groups = [] + self._processopt = processopt + self._usage = usage + self._inidict = {} + self._ininames = [] + self.extra_info = {} + + def processoption(self, option): + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup(self, name, description="", after=None): + """ get (or create) a named option Group. + + :name: name of the option group. + :description: long description for --help output. + :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + return group + + def addoption(self, *opts, **attrs): + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse(self, args, namespace=None): + from _pytest._argcomplete import try_argcomplete + + self.optparser = self._getparser() + try_argcomplete(self.optparser) + args = [str(x) if isinstance(x, py.path.local) else x for x in args] + return self.optparser.parse_args(args, namespace=namespace) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + + optparser = MyOptionParser(self, self.extra_info) + groups = self._groups + [self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs="*").completer = filescompleter + return optparser + + def parse_setoption(self, args, option, namespace=None): + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args, namespace=None): + """parses and returns a namespace object with known arguments at this + point. + """ + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args(self, args, namespace=None): + """parses and returns a namespace object with known arguments, and + the remaining arguments unknown at this point. + """ + optparser = self._getparser() + args = [str(x) if isinstance(x, py.path.local) else x for x in args] + return optparser.parse_known_args(args, namespace=namespace) + + def addini(self, name, help, type=None, default=None): + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args``, ``linelist`` + or ``bool``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ + assert type in (None, "pathlist", "args", "linelist", "bool") + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument(object): + """class that mimics the necessary behaviour of optparse.Option + + its currently a least effort implementation + and ignoring choices and integer prefixes + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + _typ_map = {"int": int, "string": str, "float": float, "complex": complex} + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get("dest") + if "%default" in (attrs.get("help") or ""): + warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + DeprecationWarning, + stacklevel=3, + ) + try: + typ = attrs["type"] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, six.string_types): + if typ == "choice": + warnings.warn( + "`type` argument to addoption() is the string %r." + " For choices this is optional and can be omitted, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: %s)" % (typ, names), + DeprecationWarning, + stacklevel=4, + ) + # argparse expects a type here take it from + # the type of the first element + attrs["type"] = type(attrs["choices"][0]) + else: + warnings.warn( + "`type` argument to addoption() is the string %r, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: %s)" % (typ, names), + DeprecationWarning, + stacklevel=4, + ) + attrs["type"] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs["type"] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError("need a long or short option", self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = "default dest help".split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get("help"): + a = self._attrs["help"] + a = a.replace("%default", "%(default)s") + # a = a.replace('%prog', '%(prog)s') + self._attrs["help"] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self, + ) + self._long_opts.append(opt) + + def __repr__(self): + args = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup(object): + def __init__(self, name, description="", parser=None): + self.name = name + self.description = description + self.options = [] + self.parser = parser + + def addoption(self, *optnames, **attrs): + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + conflict = set(optnames).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError("option names %s already added" % conflict) + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *optnames, **attrs): + option = Argument(*optnames, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option, shortupper=False): + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__(self, parser, extra_info=None): + if not extra_info: + extra_info = {} + self._parser = parser + argparse.ArgumentParser.__init__( + self, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user + self.extra_info = extra_info + + def error(self, message): + """error(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + Overrides the method in parent class to change exit code""" + self.print_usage(_sys.stderr) + args = {"prog": self.prog, "message": message} + self.exit(EXIT_USAGEERROR, _("%(prog)s: error: %(message)s\n") % args) + + def parse_args(self, args=None, namespace=None): + """allow splitting of positional arguments""" + args, argv = self.parse_known_args(args, namespace) + if argv: + for arg in argv: + if arg and arg[0] == "-": + lines = ["unrecognized arguments: %s" % (" ".join(argv))] + for k, v in sorted(self.extra_info.items()): + lines.append(" %s: %s" % (k, v)) + self.error("\n".join(lines)) + getattr(args, FILE_OR_DIR).extend(argv) + return args + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """shorten help for long options that differ only in extra hyphens + + - collapse **long** options that are the same except for extra hyphens + - special action attribute map_long_option allows surpressing additional + long options + - shortcut if there are only two options and one of them is a short one + - cache result on action object as this is called at least 2 times + """ + + def _format_action_invocation(self, action): + orgstr = argparse.HelpFormatter._format_action_invocation(self, action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr + return orgstr + return_list = [] + option_map = getattr(action, "map_long_option", {}) + if option_map is None: + option_map = {} + short_long = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + 'long optional argument without "--": [%s]' % (option), self + ) + xxoption = option[2:] + if xxoption.split()[0] not in option_map: + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + action._formatted_action_invocation = ", ".join(return_list) + return action._formatted_action_invocation diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/argparsing.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/argparsing.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3d2affb8640b9a2164b317d8d1b98d976c0554a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/argparsing.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/exceptions.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..19fe5cb08ed4a188840d83ce7e5b1532080b6a7e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,9 @@ +class UsageError(Exception): + """ error in pytest usage or invocation""" + + +class PrintHelp(Exception): + """Raised when pytest should print it's help to skip the rest of the + argument parsing and validation.""" + + pass diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/exceptions.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/exceptions.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ea5735fc34a89b3d73beb6243265e1497be715c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/exceptions.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/findpaths.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/findpaths.py new file mode 100644 index 0000000000000000000000000000000000000000..4f371ec7f6b0302cae7f4cd9b12d80dec8326da7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,148 @@ +import os + +import py + +from .exceptions import UsageError + + +def exists(path, ignore=EnvironmentError): + try: + return path.check() + except ignore: + return False + + +def getcfg(args, config=None): + """ + Search the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict). + + note: config is optional and used only to issue warnings explicitly (#2891). + """ + from _pytest.deprecated import CFG_PYTEST_SECTION + + inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [py.path.local()] + for arg in args: + arg = py.path.local(arg) + for base in arg.parts(reverse=True): + for inibasename in inibasenames: + p = base.join(inibasename) + if exists(p): + iniconfig = py.iniconfig.IniConfig(p) + if "pytest" in iniconfig.sections: + if inibasename == "setup.cfg" and config is not None: + from _pytest.warnings import _issue_config_warning + from _pytest.warning_types import RemovedInPytest4Warning + + _issue_config_warning( + RemovedInPytest4Warning( + CFG_PYTEST_SECTION.format(filename=inibasename) + ), + config=config, + ) + return base, p, iniconfig["pytest"] + if ( + inibasename == "setup.cfg" + and "tool:pytest" in iniconfig.sections + ): + return base, p, iniconfig["tool:pytest"] + elif inibasename == "pytest.ini": + # allowed to be empty + return base, p, {} + return None, None, None + + +def get_common_ancestor(paths): + common_ancestor = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if path.relto(common_ancestor) or path == common_ancestor: + continue + elif common_ancestor.relto(path): + common_ancestor = path + else: + shared = path.common(common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = py.path.local() + elif common_ancestor.isfile(): + common_ancestor = common_ancestor.dirpath() + return common_ancestor + + +def get_dirs_from_args(args): + def is_option(x): + return str(x).startswith("-") + + def get_file_part_from_node_id(x): + return str(x).split("::")[0] + + def get_dir_from_path(path): + if path.isdir(): + return path + return py.path.local(path.dirname) + + # These look like paths but may not exist + possible_paths = ( + py.path.local(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if path.exists()] + + +def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None): + dirs = get_dirs_from_args(args) + if inifile: + iniconfig = py.iniconfig.IniConfig(inifile) + is_cfg_file = str(inifile).endswith(".cfg") + sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"] + for section in sections: + try: + inicfg = iniconfig[section] + if is_cfg_file and section == "pytest" and config is not None: + from _pytest.deprecated import CFG_PYTEST_SECTION + from _pytest.warnings import _issue_config_warning + + # TODO: [pytest] section in *.cfg files is deprecated. Need refactoring once + # the deprecation expires. + _issue_config_warning( + CFG_PYTEST_SECTION.format(filename=str(inifile)), config + ) + break + except KeyError: + inicfg = None + rootdir = get_common_ancestor(dirs) + else: + ancestor = get_common_ancestor(dirs) + rootdir, inifile, inicfg = getcfg([ancestor], config=config) + if rootdir is None: + for rootdir in ancestor.parts(reverse=True): + if rootdir.join("setup.py").exists(): + break + else: + rootdir, inifile, inicfg = getcfg(dirs, config=config) + if rootdir is None: + rootdir = get_common_ancestor([py.path.local(), ancestor]) + is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/" + if is_fs_root: + rootdir = ancestor + if rootdir_cmd_arg: + rootdir_abs_path = py.path.local(os.path.expandvars(rootdir_cmd_arg)) + if not os.path.isdir(str(rootdir_abs_path)): + raise UsageError( + "Directory '{}' not found. Check your '--rootdir' option.".format( + rootdir_abs_path + ) + ) + rootdir = rootdir_abs_path + return rootdir, inifile, inicfg or {} diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/findpaths.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/findpaths.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ae0d8bbad2705e49742ef6f709dc078e55ee6d3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/config/findpaths.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/debugging.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/debugging.py new file mode 100644 index 0000000000000000000000000000000000000000..5a9729d5baaec5c82f03dce70a36de8449fb8abd --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/debugging.py @@ -0,0 +1,185 @@ +""" interactive debugging with PDB, the Python Debugger. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pdb +import sys +from doctest import UnexpectedException + +from _pytest import outcomes +from _pytest.config import hookimpl + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="start the interactive Python debugger on errors or KeyboardInterrupt.", + ) + group._addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + help="start a custom interactive Python debugger on errors. " + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group._addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test.", + ) + + +def pytest_configure(config): + if config.getvalue("usepdb_cls"): + modname, classname = config.getvalue("usepdb_cls").split(":") + __import__(modname) + pdb_cls = getattr(sys.modules[modname], classname) + else: + pdb_cls = pdb.Pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + old = (pdb.set_trace, pytestPDB._pluginmanager) + + def fin(): + pdb.set_trace, pytestPDB._pluginmanager = old + pytestPDB._config = None + pytestPDB._pdb_cls = pdb.Pdb + + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + pytestPDB._pdb_cls = pdb_cls + config._cleanup.append(fin) + + +class pytestPDB(object): + """ Pseudo PDB that defers to the real pdb. """ + + _pluginmanager = None + _config = None + _pdb_cls = pdb.Pdb + + @classmethod + def set_trace(cls, set_break=True): + """ invoke PDB set_trace debugging, dropping any IO capturing. """ + import _pytest.config + + frame = sys._getframe().f_back + if cls._pluginmanager is not None: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + tw.sep(">", "PDB set_trace (IO-capturing turned off)") + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config) + if set_break: + cls._pdb_cls().set_trace(frame) + + +class PdbInvoke(object): + def pytest_exception_interact(self, node, call, report): + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excrepr, excinfo): + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + + +class PdbTrace(object): + @hookimpl(hookwrapper=True) + def pytest_pyfunc_call(self, pyfuncitem): + _test_pytest_function(pyfuncitem) + yield + + +def _test_pytest_function(pyfuncitem): + pytestPDB.set_trace(set_break=False) + testfunction = pyfuncitem.obj + pyfuncitem.obj = pdb.runcall + if pyfuncitem._isyieldedfunction(): + arg_list = list(pyfuncitem._args) + arg_list.insert(0, testfunction) + pyfuncitem._args = tuple(arg_list) + else: + if "func" in pyfuncitem._fixtureinfo.argnames: + raise ValueError("--trace can't be used with a fixture named func!") + pyfuncitem.funcargs["func"] = testfunction + new_list = list(pyfuncitem._fixtureinfo.argnames) + new_list.append("func") + pyfuncitem._fixtureinfo.argnames = tuple(new_list) + + +def _enter_pdb(node, excinfo, rep): + # XXX we re-use the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb = _postmortem_traceback(excinfo) + rep._pdbshown = True + if post_mortem(tb): + outcomes.exit("Quitting debugger") + return rep + + +def _postmortem_traceback(excinfo): + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.exc_info[2] + else: + return excinfo._excinfo[2] + + +def _find_last_non_hidden_frame(stack): + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return i + + +def post_mortem(t): + class Pdb(pytestPDB._pdb_cls): + def get_stack(self, f, t): + stack, i = pdb.Pdb.get_stack(self, f, t) + if f is None: + i = _find_last_non_hidden_frame(stack) + return stack, i + + p = Pdb() + p.reset() + p.interaction(None, t) + return p.quitting diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/debugging.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/debugging.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c943a5bb1dc83183b6d186e34034b84efed3eaa Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/debugging.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/deprecated.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..bc1b2a6ec71edc45643442dce5895bfc615df1da --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/deprecated.py @@ -0,0 +1,118 @@ +""" +This module contains deprecation messages and bits of code used elsewhere in the codebase +that is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either PytestWarning instances or UnformattedWarning +in case of warnings which need to format their messages. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from _pytest.warning_types import RemovedInPytest4Warning +from _pytest.warning_types import UnformattedWarning + + +MAIN_STR_ARGS = RemovedInPytest4Warning( + "passing a string to pytest.main() is deprecated, " + "pass a list of arguments instead." +) + +YIELD_TESTS = RemovedInPytest4Warning( + "yield tests are deprecated, and scheduled to be removed in pytest 4.0" +) + +CACHED_SETUP = RemovedInPytest4Warning( + "cached_setup is deprecated and will be removed in a future release. " + "Use standard fixture functions instead." +) + +COMPAT_PROPERTY = UnformattedWarning( + RemovedInPytest4Warning, + "usage of {owner}.{name} is deprecated, please use pytest.{name} instead", +) + +CUSTOM_CLASS = UnformattedWarning( + RemovedInPytest4Warning, + 'use of special named "{name}" objects in collectors of type "{type_name}" to ' + "customize the created nodes is deprecated. " + "Use pytest_pycollect_makeitem(...) to create custom " + "collection nodes instead.", +) + +FUNCARG_PREFIX = UnformattedWarning( + RemovedInPytest4Warning, + '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated ' + "and scheduled to be removed in pytest 4.0. " + "Please remove the prefix and use the @pytest.fixture decorator instead.", +) + +FIXTURE_FUNCTION_CALL = UnformattedWarning( + RemovedInPytest4Warning, + 'Fixture "{name}" called directly. Fixtures are not meant to be called directly, ' + "are created automatically when test functions request them as parameters. " + "See https://docs.pytest.org/en/latest/fixture.html for more information.", +) + +CFG_PYTEST_SECTION = UnformattedWarning( + RemovedInPytest4Warning, + "[pytest] section in {filename} files is deprecated, use [tool:pytest] instead.", +) + +GETFUNCARGVALUE = RemovedInPytest4Warning( + "getfuncargvalue is deprecated, use getfixturevalue" +) + +RESULT_LOG = RemovedInPytest4Warning( + "--result-log is deprecated and scheduled for removal in pytest 4.0.\n" + "See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information." +) + +MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning( + "MarkInfo objects are deprecated as they contain merged marks which are hard to deal with correctly.\n" + "Please use node.get_closest_marker(name) or node.iter_markers(name).\n" + "Docs: https://docs.pytest.org/en/latest/mark.html#updating-code" +) + +MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning( + "Applying marks directly to parameters is deprecated," + " please use pytest.param(..., marks=...) instead.\n" + "For more details, see: https://docs.pytest.org/en/latest/parametrize.html" +) + +NODE_WARN = RemovedInPytest4Warning( + "Node.warn(code, message) form has been deprecated, use Node.warn(warning_instance) instead." +) + +RECORD_XML_PROPERTY = RemovedInPytest4Warning( + 'Fixture renamed from "record_xml_property" to "record_property" as user ' + "properties are now available to all reporters.\n" + '"record_xml_property" is now deprecated.' +) + +COLLECTOR_MAKEITEM = RemovedInPytest4Warning( + "pycollector makeitem was removed as it is an accidentially leaked internal api" +) + +METAFUNC_ADD_CALL = RemovedInPytest4Warning( + "Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.\n" + "Please use Metafunc.parametrize instead." +) + +PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = RemovedInPytest4Warning( + "Defining pytest_plugins in a non-top-level conftest is deprecated, " + "because it affects the entire directory tree in a non-explicit way.\n" + "Please move it to the top level conftest file instead." +) + +PYTEST_NAMESPACE = RemovedInPytest4Warning( + "pytest_namespace is deprecated and will be removed soon" +) + +PYTEST_ENSURETEMP = RemovedInPytest4Warning( + "pytest/tmpdir_factory.ensuretemp is deprecated, \n" + "please use the tmp_path fixture or tmp_path_factory.mktemp" +) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/deprecated.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/deprecated.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ae408012218f023495cad1ff7cb25a722992db3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/deprecated.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/doctest.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/doctest.py new file mode 100644 index 0000000000000000000000000000000000000000..dbf7df823652f2a02e0d3a24ad6820c1802b8431 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/doctest.py @@ -0,0 +1,516 @@ +""" discover and run doctests in modules and test files.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import platform +import sys +import traceback + +import pytest +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import TerminalRepr +from _pytest.fixtures import FixtureRequest + + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None + + +def pytest_addoption(parser): + parser.addini( + "doctest_optionflags", + "option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="ignore doctest ImportErrors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="for a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_collect_file(path, parent): + config = parent.config + if path.ext == ".py": + if config.option.doctestmodules and not _is_setup_py(config, path, parent): + return DoctestModule(path, parent) + elif _is_doctest(config, path, parent): + return DoctestTextfile(path, parent) + + +def _is_setup_py(config, path, parent): + if path.basename != "setup.py": + return False + contents = path.read() + return "setuptools" in contents or "distutils" in contents + + +def _is_doctest(config, path, parent): + if path.ext in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + for glob in globs: + if path.check(fnmatch=glob): + return True + return False + + +class ReprFailDoctest(TerminalRepr): + def __init__(self, reprlocation_lines): + # List of (reprlocation, lines) tuples + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw): + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures): + super(MultipleDoctestFailures, self).__init__() + self.failures = failures + + +def _init_runner_class(): + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """ + Runner to collect failures. Note that the out variable in this case is + a list instead of a stdout-like object + """ + + def __init__( + self, checker=None, verbose=None, optionflags=0, continue_on_failure=True + ): + doctest.DebugRunner.__init__( + self, checker=checker, verbose=verbose, optionflags=optionflags + ) + self.continue_on_failure = continue_on_failure + + def report_failure(self, out, test, example, got): + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception(self, out, test, example, exc_info): + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True): + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + return RUNNER_CLASS( + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(pytest.Item): + def __init__(self, name, parent, runner=None, dtest=None): + super(DoctestItem, self).__init__(name, parent) + self.runner = runner + self.dtest = dtest + self.obj = None + self.fixture_request = None + + def setup(self): + if self.dtest is not None: + self.fixture_request = _setup_fixtures(self) + globs = dict(getfixture=self.fixture_request.getfixturevalue) + for name, value in self.fixture_request.getfixturevalue( + "doctest_namespace" + ).items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self): + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures = [] + self.runner.run(self.dtest, out=failures) + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self): + """ + Disable output capturing. Otherwise, stdout is lost to doctest (#985) + """ + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + def repr_failure(self, excinfo): + import doctest + + failures = None + if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)): + failures = [excinfo.value] + elif excinfo.errisinstance(MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is not None: + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + reprlocation = ReprFileLocation(filename, lineno, message) + checker = _get_checker() + report_choice = _get_report_choice( + self.config.getoption("doctestreport") + ) + if lineno is not None: + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + lines = [ + "%03d %s" % (i + test.lineno + 1, x) + for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append("??? %s %s" % (indent, line)) + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo(failure.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] + lines += traceback.format_exception(*failure.exc_info) + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + else: + return super(DoctestItem, self).repr_failure(excinfo) + + def reportinfo(self): + return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name + + +def _get_flag_lookup(): + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + ) + + +def get_optionflags(parent): + optionflags_str = parent.config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config): + continue_on_failure = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(pytest.Module): + obj = None + + def collect(self): + import doctest + + # inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker + encoding = self.config.getini("doctest_encoding") + text = self.fspath.read_text(encoding) + filename = str(self.fspath) + name = self.fspath.basename + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self) + + runner = _get_runner( + verbose=0, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + _fix_spoof_python2(runner, encoding) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem(test.name, self, runner, test) + + +def _check_all_skipped(test): + """raises pytest.skip() if all examples in the given DocTest have the SKIP + option set. + """ + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + pytest.skip("all tests skipped by +SKIP option") + + +class DoctestModule(pytest.Module): + def collect(self): + import doctest + + if self.fspath.basename == "conftest.py": + module = self.config.pluginmanager._importconftest(self.fspath) + else: + try: + module = self.fspath.pyimport() + except ImportError: + if self.config.getvalue("doctest_ignore_import_errors"): + pytest.skip("unable to import module %r" % self.fspath) + else: + raise + # uses internal doctest module parsing mechanism + finder = doctest.DocTestFinder() + optionflags = get_optionflags(self) + runner = _get_runner( + verbose=0, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem(test.name, self, runner, test) + + +def _setup_fixtures(doctest_item): + """ + Used by DoctestTextfile and DoctestItem to setup fixture information. + """ + + def func(): + pass + + doctest_item.funcargs = {} + fm = doctest_item.session._fixturemanager + doctest_item._fixtureinfo = fm.getfixtureinfo( + node=doctest_item, func=func, cls=None, funcargs=False + ) + fixture_request = FixtureRequest(doctest_item) + fixture_request._fillfixtures() + return fixture_request + + +def _get_checker(): + """ + Returns a doctest.OutputChecker subclass that takes in account the + ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES + to strip b'' prefixes. + Useful when the same doctest should run in Python 2 and Python 3. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + if hasattr(_get_checker, "LiteralsOutputChecker"): + return _get_checker.LiteralsOutputChecker() + + import doctest + import re + + class LiteralsOutputChecker(doctest.OutputChecker): + """ + Copied from doctest_nose_plugin.py from the nltk project: + https://github.com/nltk/nltk + + Further extended to also support byte literals. + """ + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + + def check_output(self, want, got, optionflags): + res = doctest.OutputChecker.check_output(self, want, got, optionflags) + if res: + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + if not allow_unicode and not allow_bytes: + return False + + else: # pragma: no cover + + def remove_prefixes(regex, txt): + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + res = doctest.OutputChecker.check_output(self, want, got, optionflags) + return res + + _get_checker.LiteralsOutputChecker = LiteralsOutputChecker + return _get_checker.LiteralsOutputChecker() + + +def _get_allow_unicode_flag(): + """ + Registers and returns the ALLOW_UNICODE flag. + """ + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag(): + """ + Registers and returns the ALLOW_BYTES flag. + """ + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_report_choice(key): + """ + This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid + importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +def _fix_spoof_python2(runner, encoding): + """ + Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This + should patch only doctests for text files because they don't have a way to declare their + encoding. Doctests in docstrings from Python modules don't have the same problem given that + Python already decoded the strings. + + This fixes the problem related in issue #2434. + """ + from _pytest.compat import _PY2 + + if not _PY2: + return + + from doctest import _SpoofOut + + class UnicodeSpoof(_SpoofOut): + def getvalue(self): + result = _SpoofOut.getvalue(self) + if encoding and isinstance(result, bytes): + result = result.decode(encoding) + return result + + runner._fakeout = UnicodeSpoof() + + +@pytest.fixture(scope="session") +def doctest_namespace(): + """ + Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. + """ + return dict() diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/doctest.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/doctest.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f00288a837e8a4f0e6230a17f6dd2d6f1b29d51a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/doctest.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/fixtures.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/fixtures.py new file mode 100644 index 0000000000000000000000000000000000000000..49c3402dcdde17fc5945f0b00b3514c47f435056 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/fixtures.py @@ -0,0 +1,1386 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import inspect +import sys +import warnings +from collections import defaultdict +from collections import deque +from collections import OrderedDict + +import attr +import py +import six +from more_itertools import flatten +from py._code.code import FormattedExcinfo + +import _pytest +from _pytest import nodes +from _pytest._code.code import TerminalRepr +from _pytest.compat import _format_args +from _pytest.compat import _PytestWrapper +from _pytest.compat import exc_clear +from _pytest.compat import FuncargnamesCompatAttr +from _pytest.compat import get_real_func +from _pytest.compat import get_real_method +from _pytest.compat import getfslineno +from _pytest.compat import getfuncargnames +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_generator +from _pytest.compat import isclass +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.deprecated import FIXTURE_FUNCTION_CALL +from _pytest.outcomes import fail +from _pytest.outcomes import TEST_OUTCOME + +FIXTURE_MSG = 'fixtures cannot have "pytest_funcarg__" prefix and be decorated with @pytest.fixture:\n{}' + + +@attr.s(frozen=True) +class PseudoFixtureDef(object): + cached_result = attr.ib() + scope = attr.ib() + + +def pytest_sessionstart(session): + import _pytest.python + import _pytest.nodes + + scopename2class.update( + { + "package": _pytest.python.Package, + "class": _pytest.python.Class, + "module": _pytest.python.Module, + "function": _pytest.nodes.Item, + "session": _pytest.main.Session, + } + ) + session._fixturemanager = FixtureManager(session) + + +scopename2class = {} + + +scope2props = dict(session=()) +scope2props["package"] = ("fspath",) +scope2props["module"] = ("fspath", "module") +scope2props["class"] = scope2props["module"] + ("cls",) +scope2props["instance"] = scope2props["class"] + ("instance",) +scope2props["function"] = scope2props["instance"] + ("function", "keywords") + + +def scopeproperty(name=None, doc=None): + def decoratescope(func): + scopename = name or func.__name__ + + def provide(self): + if func.__name__ in scope2props[self.scope]: + return func(self) + raise AttributeError( + "%s not available in %s-scoped context" % (scopename, self.scope) + ) + + return property(provide, None, None, func.__doc__) + + return decoratescope + + +def get_scope_package(node, fixturedef): + import pytest + + cls = pytest.Package + current = node + fixture_package_name = "%s/%s" % (fixturedef.baseid, "__init__.py") + while current and ( + type(current) is not cls or fixture_package_name != current.nodeid + ): + current = current.parent + if current is None: + return node.session + return current + + +def get_scope_node(node, scope): + cls = scopename2class.get(scope) + if cls is None: + raise ValueError("unknown scope") + return node.getparent(cls) + + +def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): + # this function will transform all collected calls to a functions + # if they use direct funcargs (i.e. direct parametrization) + # because we want later test execution to be able to rely on + # an existing FixtureDef structure for all arguments. + # XXX we can probably avoid this algorithm if we modify CallSpec2 + # to directly care for creating the fixturedefs within its methods. + if not metafunc._calls[0].funcargs: + return # this function call does not have direct parametrization + # collect funcargs of all callspecs into a list of values + arg2params = {} + arg2scope = {} + for callspec in metafunc._calls: + for argname, argvalue in callspec.funcargs.items(): + assert argname not in callspec.params + callspec.params[argname] = argvalue + arg2params_list = arg2params.setdefault(argname, []) + callspec.indices[argname] = len(arg2params_list) + arg2params_list.append(argvalue) + if argname not in arg2scope: + scopenum = callspec._arg2scopenum.get(argname, scopenum_function) + arg2scope[argname] = scopes[scopenum] + callspec.funcargs.clear() + + # register artificial FixtureDef's so that later at test execution + # time we can rely on a proper FixtureDef to exist for fixture setup. + arg2fixturedefs = metafunc._arg2fixturedefs + for argname, valuelist in arg2params.items(): + # if we have a scope that is higher than function we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + scope = arg2scope[argname] + node = None + if scope != "function": + node = get_scope_node(collector, scope) + if node is None: + assert scope == "class" and isinstance(collector, _pytest.python.Module) + # use module-level collector for class-scope (for now) + node = collector + if node and argname in node._name2pseudofixturedef: + arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] + else: + fixturedef = FixtureDef( + fixturemanager, + "", + argname, + get_direct_param_fixture_func, + arg2scope[argname], + valuelist, + False, + False, + ) + arg2fixturedefs[argname] = [fixturedef] + if node is not None: + node._name2pseudofixturedef[argname] = fixturedef + + +def getfixturemarker(obj): + """ return fixturemarker or None if it doesn't exist or raised + exceptions.""" + try: + return getattr(obj, "_pytestfixturefunction", None) + except TEST_OUTCOME: + # some objects raise errors like request (from flask import request) + # we don't expect them to be fixture functions + return None + + +def get_parametrized_fixture_keys(item, scopenum): + """ return list of keys for all parametrized arguments which match + the specified scope. """ + assert scopenum < scopenum_function # function + try: + cs = item.callspec + except AttributeError: + pass + else: + # cs.indices.items() is random order of argnames. Need to + # sort this so that different calls to + # get_parametrized_fixture_keys will be deterministic. + for argname, param_index in sorted(cs.indices.items()): + if cs._arg2scopenum[argname] != scopenum: + continue + if scopenum == 0: # session + key = (argname, param_index) + elif scopenum == 1: # package + key = (argname, param_index, item.fspath.dirpath()) + elif scopenum == 2: # module + key = (argname, param_index, item.fspath) + elif scopenum == 3: # class + key = (argname, param_index, item.fspath, item.cls) + yield key + + +# algorithm for sorting on a per-parametrized resource setup basis +# it is called for scopenum==0 (session) first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns + + +def reorder_items(items): + argkeys_cache = {} + items_by_argkey = {} + for scopenum in range(0, scopenum_function): + argkeys_cache[scopenum] = d = {} + items_by_argkey[scopenum] = item_d = defaultdict(deque) + for item in items: + keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum)) + if keys: + d[item] = keys + for key in keys: + item_d[key].append(item) + items = OrderedDict.fromkeys(items) + return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0)) + + +def fix_cache_order(item, argkeys_cache, items_by_argkey): + for scopenum in range(0, scopenum_function): + for key in argkeys_cache[scopenum].get(item, []): + items_by_argkey[scopenum][key].appendleft(item) + + +def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum): + if scopenum >= scopenum_function or len(items) < 3: + return items + ignore = set() + items_deque = deque(items) + items_done = OrderedDict() + scoped_items_by_argkey = items_by_argkey[scopenum] + scoped_argkeys_cache = argkeys_cache[scopenum] + while items_deque: + no_argkey_group = OrderedDict() + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_group: + continue + argkeys = OrderedDict.fromkeys( + k for k in scoped_argkeys_cache.get(item, []) if k not in ignore + ) + if not argkeys: + no_argkey_group[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # we don't have to remove relevant items from later in the deque because they'll just be ignored + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + fix_cache_order(i, argkeys_cache, items_by_argkey) + items_deque.appendleft(i) + break + if no_argkey_group: + no_argkey_group = reorder_items_atscope( + no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1 + ) + for item in no_argkey_group: + items_done[item] = None + ignore.add(slicing_argkey) + return items_done + + +def fillfixtures(function): + """ fill missing funcargs for a test function. """ + try: + request = function._request + except AttributeError: + # XXX this special code path is only expected to execute + # with the oejskit plugin. It uses classes with funcargs + # and we thus have to work a bit to allow this. + fm = function.session._fixturemanager + fi = fm.getfixtureinfo(function.parent, function.obj, None) + function._fixtureinfo = fi + request = function._request = FixtureRequest(function) + request._fillfixtures() + # prune out funcargs for jstests + newfuncargs = {} + for name in fi.argnames: + newfuncargs[name] = function.funcargs[name] + function.funcargs = newfuncargs + else: + request._fillfixtures() + + +def get_direct_param_fixture_func(request): + return request.param + + +@attr.s(slots=True) +class FuncFixtureInfo(object): + # original function argument names + argnames = attr.ib(type=tuple) + # argnames that function immediately requires. These include argnames + + # fixture names specified via usefixtures and via autouse=True in fixture + # definitions. + initialnames = attr.ib(type=tuple) + names_closure = attr.ib() # type: List[str] + name2fixturedefs = attr.ib() # type: List[str, List[FixtureDef]] + + def prune_dependency_tree(self): + """Recompute names_closure from initialnames and name2fixturedefs + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # argname may be smth not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest(FuncargnamesCompatAttr): + """ A request for a fixture from a test or fixture function. + + A request object gives access to the requesting test context + and has an optional ``param`` attribute in case + the fixture is parametrized indirectly. + """ + + def __init__(self, pyfuncitem): + self._pyfuncitem = pyfuncitem + #: fixture for which this request is being performed + self.fixturename = None + #: Scope string, one of "function", "class", "module", "session" + self.scope = "function" + self._fixture_defs = {} # argname -> FixtureDef + fixtureinfo = pyfuncitem._fixtureinfo + self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() + self._arg2index = {} + self._fixturemanager = pyfuncitem.session._fixturemanager + + @property + def fixturenames(self): + """names of all active fixtures in this request""" + result = list(self._pyfuncitem._fixtureinfo.names_closure) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + def node(self): + """ underlying collection node (depends on current request scope)""" + return self._getscopeitem(self.scope) + + def _getnextfixturedef(self, argname): + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # we arrive here because of a dynamic call to + # getfixturevalue(argname) usage which was naturally + # not known at parsing/collection time + parentid = self._pyfuncitem.parent.nodeid + fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) + self._arg2fixturedefs[argname] = fixturedefs + # fixturedefs list is immutable so we maintain a decreasing index + index = self._arg2index.get(argname, 0) - 1 + if fixturedefs is None or (-index > len(fixturedefs)): + raise FixtureLookupError(argname, self) + self._arg2index[argname] = index + return fixturedefs[index] + + @property + def config(self): + """ the pytest config object associated with this request. """ + return self._pyfuncitem.config + + @scopeproperty() + def function(self): + """ test function object if the request has a per-function scope. """ + return self._pyfuncitem.obj + + @scopeproperty("class") + def cls(self): + """ class (can be None) where the test function was collected. """ + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """ instance (can be None) on which test function was collected. """ + # unittest support hack, see _pytest.unittest.TestCaseFunction + try: + return self._pyfuncitem._testcase + except AttributeError: + function = getattr(self, "function", None) + return getattr(function, "__self__", None) + + @scopeproperty() + def module(self): + """ python module object where the test function was collected. """ + return self._pyfuncitem.getparent(_pytest.python.Module).obj + + @scopeproperty() + def fspath(self): + """ the file system path of the test module which collected this test. """ + return self._pyfuncitem.fspath + + @property + def keywords(self): + """ keywords/markers dictionary for the underlying node. """ + return self.node.keywords + + @property + def session(self): + """ pytest session object. """ + return self._pyfuncitem.session + + def addfinalizer(self, finalizer): + """ add finalizer/teardown function to be called after the + last test within the requesting test context finished + execution. """ + # XXX usually this method is shadowed by fixturedef specific ones + self._addfinalizer(finalizer, scope=self.scope) + + def _addfinalizer(self, finalizer, scope): + colitem = self._getscopeitem(scope) + self._pyfuncitem.session._setupstate.addfinalizer( + finalizer=finalizer, colitem=colitem + ) + + def applymarker(self, marker): + """ Apply a marker to a single test function invocation. + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object + created by a call to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg): + """ raise a FixtureLookupError with the given message. """ + raise self._fixturemanager.FixtureLookupError(None, self, msg) + + def _fillfixtures(self): + item = self._pyfuncitem + fixturenames = getattr(item, "fixturenames", self.fixturenames) + for argname in fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): + """ (deprecated) Return a testing resource managed by ``setup`` & + ``teardown`` calls. ``scope`` and ``extrakey`` determine when the + ``teardown`` function will be called so that subsequent calls to + ``setup`` would recreate the resource. With pytest-2.3 you often + do not need ``cached_setup()`` as you can directly declare a scope + on a fixture function and register a finalizer through + ``request.addfinalizer()``. + + :arg teardown: function receiving a previously setup resource. + :arg setup: a no-argument function creating a resource. + :arg scope: a string value out of ``function``, ``class``, ``module`` + or ``session`` indicating the caching lifecycle of the resource. + :arg extrakey: added to internal caching key of (funcargname, scope). + """ + from _pytest.deprecated import CACHED_SETUP + + warnings.warn(CACHED_SETUP, stacklevel=2) + if not hasattr(self.config, "_setupcache"): + self.config._setupcache = {} # XXX weakref? + cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) + cache = self.config._setupcache + try: + val = cache[cachekey] + except KeyError: + self._check_scope(self.fixturename, self.scope, scope) + val = setup() + cache[cachekey] = val + if teardown is not None: + + def finalizer(): + del cache[cachekey] + teardown(val) + + self._addfinalizer(finalizer, scope=scope) + return val + + def getfixturevalue(self, argname): + """ Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + """ + return self._get_active_fixturedef(argname).cached_result[0] + + def getfuncargvalue(self, argname): + """ Deprecated, use getfixturevalue. """ + from _pytest import deprecated + + warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2) + return self.getfixturevalue(argname) + + def _get_active_fixturedef(self, argname): + try: + return self._fixture_defs[argname] + except KeyError: + try: + fixturedef = self._getnextfixturedef(argname) + except FixtureLookupError: + if argname == "request": + cached_result = (self, [0], None) + scope = "function" + return PseudoFixtureDef(cached_result, scope) + raise + # remove indent to prevent the python3 exception + # from leaking into the call + self._compute_fixture_value(fixturedef) + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _get_fixturestack(self): + current = self + values = [] + while 1: + fixturedef = getattr(current, "_fixturedef", None) + if fixturedef is None: + values.reverse() + return values + values.append(fixturedef) + current = current._parent_request + + def _compute_fixture_value(self, fixturedef): + """ + Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will + force the FixtureDef object to throw away any previous results and compute a new fixture value, which + will be stored into the FixtureDef object itself. + + :param FixtureDef fixturedef: + """ + # prepare a subrequest object before calling fixture function + # (latter managed by fixturedef) + argname = fixturedef.argname + funcitem = self._pyfuncitem + scope = fixturedef.scope + try: + param = funcitem.callspec.getparam(argname) + except (AttributeError, ValueError): + param = NOTSET + param_index = 0 + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" + "Node id: {nodeid}\n" + "Function type: {typename}" + ).format( + name=funcitem.name, + nodeid=funcitem.nodeid, + typename=type(funcitem).__name__, + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = frameinfo.filename + source_lineno = frameinfo.lineno + source_path = py.path.local(source_path) + if source_path.relto(funcitem.config.rootdir): + source_path = source_path.relto(funcitem.config.rootdir) + msg = ( + "The requested fixture has no parameter defined for test:\n" + " {}\n\n" + "Requested fixture '{}' defined in:\n{}" + "\n\nRequested here:\n{}:{}".format( + funcitem.nodeid, + fixturedef.argname, + getlocation(fixturedef.func, funcitem.config.rootdir), + source_path, + source_lineno, + ) + ) + fail(msg, pytrace=False) + else: + # indices might not be set if old-style metafunc.addcall() was used + param_index = funcitem.callspec.indices.get(argname, 0) + # if a parametrize invocation set a scope it will override + # the static scope defined with the fixture function + paramscopenum = funcitem.callspec._arg2scopenum.get(argname) + if paramscopenum is not None: + scope = scopes[paramscopenum] + + subrequest = SubRequest(self, scope, param, param_index, fixturedef) + + # check if a higher-level scoped fixture accesses a lower level one + subrequest._check_scope(argname, self.scope, scope) + + # clear sys.exc_info before invoking the fixture (python bug?) + # if its not explicitly cleared it will leak into the call + exc_clear() + try: + # call the fixture function + fixturedef.execute(request=subrequest) + finally: + # if fixture function failed it might have registered finalizers + self.session._setupstate.addfinalizer( + functools.partial(fixturedef.finish, request=subrequest), + subrequest.node, + ) + + def _check_scope(self, argname, invoking_scope, requested_scope): + if argname == "request": + return + if scopemismatch(invoking_scope, requested_scope): + # try to report something helpful + lines = self._factorytraceback() + fail( + "ScopeMismatch: You tried to access the %r scoped " + "fixture %r with a %r scoped request object, " + "involved factories\n%s" + % ((requested_scope, argname, invoking_scope, "\n".join(lines))), + pytrace=False, + ) + + def _factorytraceback(self): + lines = [] + for fixturedef in self._get_fixturestack(): + factory = fixturedef.func + fs, lineno = getfslineno(factory) + p = self._pyfuncitem.session.fspath.bestrelpath(fs) + args = _format_args(factory) + lines.append("%s:%d: def %s%s" % (p, lineno, factory.__name__, args)) + return lines + + def _getscopeitem(self, scope): + if scope == "function": + # this might also be a non-function Item despite its attribute name + return self._pyfuncitem + if scope == "package": + node = get_scope_package(self._pyfuncitem, self._fixturedef) + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope == "class": + # fallback to function item itself + node = self._pyfuncitem + assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( + scope, self._pyfuncitem + ) + return node + + def __repr__(self): + return "" % (self.node) + + +class SubRequest(FixtureRequest): + """ a sub request for handling getting a fixture from a + test function/fixture. """ + + def __init__(self, request, scope, param, param_index, fixturedef): + self._parent_request = request + self.fixturename = fixturedef.argname + if param is not NOTSET: + self.param = param + self.param_index = param_index + self.scope = scope + self._fixturedef = fixturedef + self._pyfuncitem = request._pyfuncitem + self._fixture_defs = request._fixture_defs + self._arg2fixturedefs = request._arg2fixturedefs + self._arg2index = request._arg2index + self._fixturemanager = request._fixturemanager + + def __repr__(self): + return "" % (self.fixturename, self._pyfuncitem) + + def addfinalizer(self, finalizer): + self._fixturedef.addfinalizer(finalizer) + + +class ScopeMismatchError(Exception): + """ A fixture function tries to use a different fixture function which + which has a lower scope (e.g. a Session one calls a function one) + """ + + +scopes = "session package module class function".split() +scopenum_function = scopes.index("function") + + +def scopemismatch(currentscope, newscope): + return scopes.index(newscope) > scopes.index(currentscope) + + +def scope2index(scope, descr, where=None): + """Look up the index of ``scope`` and raise a descriptive value error + if not defined. + """ + try: + return scopes.index(scope) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, "from {} ".format(where) if where else "", scope + ), + pytrace=False, + ) + + +class FixtureLookupError(LookupError): + """ could not return a requested Fixture (missing or invalid). """ + + def __init__(self, argname, request, msg=None): + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self): + tblines = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # the last fixture raise an error, let's present + # it at the requesting side + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (IOError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline("file %s, line %s" % (fspath, lineno + 1)) + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parentid = self.request._pyfuncitem.parent.nodeid + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parentid)) + if faclist: + available.add(name) + if self.argname in available: + msg = " recursive dependency involving fixture '{}' detected".format( + self.argname + ) + else: + msg = "fixture '{}' not found".format(self.argname) + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__(self, filename, firstlineno, tblines, errorstring, argname): + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw): + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + "{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()), + red=True, + ) + for line in lines[1:]: + tw.line( + "{} {}".format(FormattedExcinfo.flow_marker, line.strip()), + red=True, + ) + tw.line() + tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) + + +def fail_fixturefunc(fixturefunc, msg): + fs, lineno = getfslineno(fixturefunc) + location = "%s:%s" % (fs, lineno + 1) + source = _pytest._code.Source(fixturefunc) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) + + +def call_fixture_func(fixturefunc, request, kwargs): + yieldctx = is_generator(fixturefunc) + if yieldctx: + it = fixturefunc(**kwargs) + res = next(it) + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it) + request.addfinalizer(finalizer) + else: + res = fixturefunc(**kwargs) + return res + + +def _teardown_yield_fixture(fixturefunc, it): + """Executes the teardown of a fixture function by advancing the iterator after the + yield and ensure the iteration ends (if not it means there is more than one yield in the function)""" + try: + next(it) + except StopIteration: + pass + else: + fail_fixturefunc( + fixturefunc, "yield_fixture function has more than one 'yield'" + ) + + +class FixtureDef(object): + """ A container for a factory definition. """ + + def __init__( + self, + fixturemanager, + baseid, + argname, + func, + scope, + params, + unittest=False, + ids=None, + ): + self._fixturemanager = fixturemanager + self.baseid = baseid or "" + self.has_location = baseid is not None + self.func = func + self.argname = argname + self.scope = scope + self.scopenum = scope2index( + scope or "function", + descr="Fixture '{}'".format(func.__name__), + where=baseid, + ) + self.params = params + self.argnames = getfuncargnames(func, is_method=unittest) + self.unittest = unittest + self.ids = ids + self._finalizers = [] + + def addfinalizer(self, finalizer): + self._finalizers.append(finalizer) + + def finish(self, request): + exceptions = [] + try: + while self._finalizers: + try: + func = self._finalizers.pop() + func() + except: # noqa + exceptions.append(sys.exc_info()) + if exceptions: + e = exceptions[0] + del exceptions # ensure we don't keep all frames alive because of the traceback + six.reraise(*e) + + finally: + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + hook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # even if finalization fails, we invalidate + # the cached fixture value and remove + # all finalizers because they may be bound methods which will + # keep instances alive + if hasattr(self, "cached_result"): + del self.cached_result + self._finalizers = [] + + def execute(self, request): + # get required arguments and register our own finish() + # with their finalization + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + if argname != "request": + fixturedef.addfinalizer(functools.partial(self.finish, request=request)) + + my_cache_key = request.param_index + cached_result = getattr(self, "cached_result", None) + if cached_result is not None: + result, cache_key, err = cached_result + if my_cache_key == cache_key: + if err is not None: + six.reraise(*err) + else: + return result + # we have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one + self.finish(request) + assert not hasattr(self, "cached_result") + + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + return hook.pytest_fixture_setup(fixturedef=self, request=request) + + def __repr__(self): + return "" % ( + self.argname, + self.scope, + self.baseid, + ) + + +def resolve_fixture_function(fixturedef, request): + """Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific + instances and bound methods. + """ + fixturefunc = fixturedef.func + if fixturedef.unittest: + if request.instance is not None: + # bind the unbound method to the TestCase instance + fixturefunc = fixturedef.func.__get__(request.instance) + else: + # the fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + if request.instance is not None: + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(request.instance) + return fixturefunc + + +def pytest_fixture_setup(fixturedef, request): + """ Execution of fixture setup. """ + kwargs = {} + for argname in fixturedef.argnames: + fixdef = request._get_active_fixturedef(argname) + result, arg_cache_key, exc = fixdef.cached_result + request._check_scope(argname, request.scope, fixdef.scope) + kwargs[argname] = result + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = request.param_index + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME: + fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +def _ensure_immutable_ids(ids): + if ids is None: + return + if callable(ids): + return ids + return tuple(ids) + + +def wrap_function_to_warning_if_called_directly(function, fixture_marker): + """Wrap the given fixture function so we can issue warnings about it being called directly, instead of + used as an argument in a test function. + """ + is_yield_function = is_generator(function) + warning = FIXTURE_FUNCTION_CALL.format( + name=fixture_marker.name or function.__name__ + ) + + if is_yield_function: + + @functools.wraps(function) + def result(*args, **kwargs): + __tracebackhide__ = True + warnings.warn(warning, stacklevel=3) + for x in function(*args, **kwargs): + yield x + + else: + + @functools.wraps(function) + def result(*args, **kwargs): + __tracebackhide__ = True + warnings.warn(warning, stacklevel=3) + return function(*args, **kwargs) + + if six.PY2: + result.__wrapped__ = function + + # keep reference to the original function in our own custom attribute so we don't unwrap + # further than this point and lose useful wrappings like @mock.patch (#3774) + result.__pytest_wrapped__ = _PytestWrapper(function) + + return result + + +@attr.s(frozen=True) +class FixtureFunctionMarker(object): + scope = attr.ib() + params = attr.ib(converter=attr.converters.optional(tuple)) + autouse = attr.ib(default=False) + ids = attr.ib(default=None, converter=_ensure_immutable_ids) + name = attr.ib(default=None) + + def __call__(self, function): + if isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if getattr(function, "_pytestfixturefunction", False): + raise ValueError( + "fixture is being applied more than once to the same function" + ) + + function = wrap_function_to_warning_if_called_directly(function, self) + + function._pytestfixturefunction = self + return function + + +def fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test + modules or classes can use the ``pytest.mark.usefixtures(fixturename)`` + marker. + + Test functions can directly use fixture names as input + arguments in which case the fixture instance returned from the fixture + function will be injected. + + Fixtures can provide their values to test functions using ``return`` or ``yield`` + statements. When using ``yield`` the code block after the ``yield`` statement is executed + as teardown code regardless of the test outcome, and must yield exactly once. + + :arg scope: the scope for which this fixture is shared, one of + ``"function"`` (default), ``"class"``, ``"module"``, + ``"package"`` or ``"session"``. + + ``"package"`` is considered **experimental** at this time. + + :arg params: an optional list of parameters which will cause multiple + invocations of the fixture function and all of the tests + using it. + + :arg autouse: if True, the fixture func is activated for all tests that + can see it. If False (the default) then an explicit + reference is needed to activate the fixture. + + :arg ids: list of string ids each corresponding to the params + so that they are part of the test id. If no ids are provided + they will be generated automatically from the params. + + :arg name: the name of the fixture. This defaults to the name of the + decorated function. If a fixture is used in the same module in + which it is defined, the function name of the fixture will be + shadowed by the function arg that requests the fixture; one way + to resolve this is to name the decorated function + ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + if callable(scope) and params is None and autouse is False: + # direct decoration + return FixtureFunctionMarker("function", params, autouse, name=name)(scope) + if params is not None and not isinstance(params, (list, tuple)): + params = list(params) + return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) + + +def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None): + """ (return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name) + + +defaultfuncargprefixmarker = fixture() + + +@fixture(scope="session") +def pytestconfig(request): + """Session-scoped fixture that returns the :class:`_pytest.config.Config` object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.getoption("verbose"): + ... + + """ + return request.config + + +class FixtureManager(object): + """ + pytest fixtures definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i. e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + _argprefix = "pytest_funcarg__" + FixtureLookupError = FixtureLookupError + FixtureLookupErrorRepr = FixtureLookupErrorRepr + + def __init__(self, session): + self.session = session + self.config = session.config + self._arg2fixturedefs = {} + self._holderobjseen = set() + self._arg2finish = {} + self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] + session.config.pluginmanager.register(self, "funcmanage") + + def getfixtureinfo(self, node, func, cls, funcargs=True): + if funcargs and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, cls=cls) + else: + argnames = () + usefixtures = flatten( + mark.args for mark in node.iter_markers(name="usefixtures") + ) + initialnames = tuple(usefixtures) + argnames + fm = node.session._fixturemanager + initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure( + initialnames, node + ) + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin): + nodeid = None + try: + p = py.path.local(plugin.__file__).realpath() + except AttributeError: + pass + else: + # construct the base nodeid which is later used to check + # what fixtures are visible for particular tests (as denoted + # by their test id) + if p.basename.startswith("conftest.py"): + nodeid = p.dirpath().relto(self.config.rootdir) + if p.sep != nodes.SEP: + nodeid = nodeid.replace(p.sep, nodes.SEP) + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, nodeid): + """ return a tuple of fixture names to be used. """ + autousenames = [] + for baseid, basenames in self._nodeid_and_autousenames: + if nodeid.startswith(baseid): + if baseid: + i = len(baseid) + nextchar = nodeid[i : i + 1] + if nextchar and nextchar not in ":/": + continue + autousenames.extend(basenames) + return autousenames + + def getfixtureclosure(self, fixturenames, parentnode): + # collect the closure of all fixtures , starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive) + + parentid = parentnode.nodeid + fixturenames_closure = self._getautousenames(parentid) + + def merge(otherlist): + for arg in otherlist: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + merge(fixturenames) + + # at this point, fixturenames_closure contains what we call "initialnames", + # which is a set of fixturenames the function immediately requests. We + # need to return it as well, so save this. + initialnames = tuple(fixturenames_closure) + + arg2fixturedefs = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentid) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + merge(fixturedefs[-1].argnames) + + def sort_by_scope(arg_name): + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return scopes.index("function") + else: + return fixturedefs[-1].scopenum + + fixturenames_closure.sort(key=sort_by_scope) + return initialnames, fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc): + for argname in metafunc.fixturenames: + faclist = metafunc._arg2fixturedefs.get(argname) + if faclist: + fixturedef = faclist[-1] + if fixturedef.params is not None: + parametrize_func = getattr(metafunc.function, "parametrize", None) + if parametrize_func is not None: + parametrize_func = parametrize_func.combined + func_params = getattr(parametrize_func, "args", [[None]]) + func_kwargs = getattr(parametrize_func, "kwargs", {}) + # skip directly parametrized arguments + if "argnames" in func_kwargs: + argnames = parametrize_func.kwargs["argnames"] + else: + argnames = func_params[0] + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + if argname not in func_params and argname not in argnames: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + else: + continue # will raise FixtureLookupError at setup time + + def pytest_collection_modifyitems(self, items): + # separate parametrized setups + items[:] = reorder_items(items) + + def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): + from _pytest import deprecated + + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + holderobj = node_or_obj.obj + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + self._holderobjseen.add(holderobj) + autousenames = [] + for name in dir(holderobj): + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getatt() ignores such exceptions. + obj = safe_getattr(holderobj, name, None) + marker = getfixturemarker(obj) + # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) + # or are "@pytest.fixture" marked + if marker is None: + if not name.startswith(self._argprefix): + continue + if not callable(obj): + continue + marker = defaultfuncargprefixmarker + + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + deprecated.FUNCARG_PREFIX.format(name=name), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + name = name[len(self._argprefix) :] + elif not isinstance(marker, FixtureFunctionMarker): + # magic globals with __getattr__ might have got us a wrong + # fixture attribute + continue + else: + if marker.name: + name = marker.name + assert not name.startswith(self._argprefix), FIXTURE_MSG.format(name) + + # during fixture definition we wrap the original fixture function + # to issue a warning if called directly, so here we unwrap it in order to not emit the warning + # when pytest itself calls the fixture function + if six.PY2 and unittest: + # hack on Python 2 because of the unbound methods + obj = get_real_func(obj) + else: + obj = get_real_method(obj, holderobj) + + fixture_def = FixtureDef( + self, + nodeid, + name, + obj, + marker.scope, + marker.params, + unittest=unittest, + ids=marker.ids, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if marker.autouse: + autousenames.append(name) + + if autousenames: + self._nodeid_and_autousenames.append((nodeid or "", autousenames)) + + def getfixturedefs(self, argname, nodeid): + """ + Gets a list of fixtures which are applicable to the given node id. + + :param str argname: name of the fixture to search for + :param str nodeid: full node id of the requesting test. + :return: list[FixtureDef] + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, nodeid)) + + def _matchfactories(self, fixturedefs, nodeid): + for fixturedef in fixturedefs: + if nodes.ischildnode(fixturedef.baseid, nodeid): + yield fixturedef diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/fixtures.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/fixtures.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31801dea5637735780d1d5f3fb7ab1116e1df6d9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/fixtures.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/freeze_support.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/freeze_support.py new file mode 100644 index 0000000000000000000000000000000000000000..5edf3454cd97287f1ee5a6155bafb9c3e56cb699 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/freeze_support.py @@ -0,0 +1,47 @@ +""" +Provides a function to report all internal modules for using freezing tools +pytest +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +def freeze_includes(): + """ + Returns a list of module names used by pytest that should be + included by cx_freeze. + """ + import py + import _pytest + + result = list(_iter_all_modules(py)) + result += list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules(package, prefix=""): + """ + Iterates over the names of all modules that can be found in the given + package, recursively. + Example: + _iter_all_modules(_pytest) -> + ['_pytest.assertion.newinterpret', + '_pytest.capture', + '_pytest.core', + ... + ] + """ + import os + import pkgutil + + if type(package) is not str: + path, prefix = package.__path__[0], package.__name__ + "." + else: + path = package + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/freeze_support.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/freeze_support.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55f995e8bf51e1e2fab3f1a2a3dca49319bf9242 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/freeze_support.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/helpconfig.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/helpconfig.py new file mode 100644 index 0000000000000000000000000000000000000000..5e60d2a7f9403f834133bc0248f527151330bb53 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/helpconfig.py @@ -0,0 +1,217 @@ +""" version info, help messages, tracing configuration. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +from argparse import Action + +import py + +import pytest +from _pytest.config import PrintHelp + + +class HelpAction(Action): + """This is an argparse Action that will raise an exception in + order to skip the rest of the argument parsing when --help is passed. + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, option_strings, dest=None, default=False, help=None): + super(HelpAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done + if getattr(parser._parser, "after_preparse", False): + raise PrintHelp + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + action="store_true", + help="display pytest lib version and import information.", + ) + group._addoption( + "-h", + "--help", + action=HelpAction, + dest="help", + help="show help message and configuration info", + ) + group._addoption( + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="early-load given plugin (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="trace considerations of conftest.py files.", + ), + group.addoption( + "--debug", + action="store_true", + dest="debug", + default=False, + help="store internal tracing debug information in 'pytestdebug.log'.", + ) + group._addoption( + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.', + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_cmdline_parse(): + outcome = yield + config = outcome.get_result() + if config.option.debug: + path = os.path.abspath("pytestdebug.log") + debugfile = open(path, "w") + debugfile.write( + "versions pytest-%s, py-%s, " + "python-%s\ncwd=%s\nargs=%s\n\n" + % ( + pytest.__version__, + py.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), + config._origargs, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write("writing pytestdebug information to %s\n" % path) + + def unset_tracing(): + debugfile.close() + sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name) + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + +def pytest_cmdline_main(config): + if config.option.version: + p = py.path.local(pytest.__file__) + sys.stderr.write( + "This is pytest version %s, imported from %s\n" % (pytest.__version__, p) + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stderr.write(line + "\n") + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + + +def showhelp(config): + reporter = config.pluginmanager.get_plugin("terminalreporter") + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line() + tw.line( + "[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:" + ) + tw.line() + + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + spec = "%s (%s)" % (name, type) + line = " %-24s %s" % (spec, help) + tw.line(line[: tw.fullwidth]) + + tw.line() + tw.line("environment variables:") + vars = [ + ("PYTEST_ADDOPTS", "extra command line options"), + ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"), + ] + for name, help in vars: + tw.line(" %-24s %s" % (name, help)) + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + return + + +conftest_options = [("pytest_plugins", "list of plugin names to load")] + + +def getpluginversioninfo(config): + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("setuptools registered plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = "%s-%s at %s" % (dist.project_name, dist.version, loc) + lines.append(" " + content) + return lines + + +def pytest_report_header(config): + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append("using: pytest-%s pylib-%s" % (pytest.__version__, py.__version__)) + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(" %-20s: %s" % (name, r)) + return lines diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/helpconfig.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/helpconfig.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e10ead19a8c255e5c8ddbee92e147c431d9cc206 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/helpconfig.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/hookspec.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/hookspec.py new file mode 100644 index 0000000000000000000000000000000000000000..27e55f0ea7e0e831d878898c62d1828a49a65931 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/hookspec.py @@ -0,0 +1,611 @@ +""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ +from pluggy import HookspecMarker + +from .deprecated import PYTEST_NAMESPACE + + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager): + """called at plugin registration time to allow adding new hooks via a call to + ``pluginmanager.add_hookspecs(module_or_class, prefix)``. + + + :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True, warn_on_impl=PYTEST_NAMESPACE) +def pytest_namespace(): + """ + return dict of name->object to be made globally available in + the pytest namespace. + + This hook is called at plugin registration time. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + + .. warning:: + This hook has been **deprecated** and will be removed in pytest 4.0. + + Plugins whose users depend on the current namespace functionality should prepare to migrate to a + namespace they actually own. + + To support the migration its suggested to trigger ``DeprecationWarnings`` for objects they put into the + pytest namespace. + + An stopgap measure to avoid the warning is to monkeypatch the ``pytest`` module, but just as the + ``pytest_namespace`` hook this should be seen as a temporary measure to be removed in future versions after + an appropriate transition period. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered(plugin, manager): + """ a new pytest plugin got registered. + + :param plugin: the plugin module or instance + :param _pytest.config.PytestPluginManager manager: pytest plugin manager + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser): + """register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + + :arg _pytest.config.Parser parser: To add command line options, call + :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`. + To add ini-file values call :py:func:`parser.addini(...) + <_pytest.config.Parser.addini>`. + + Options can later be accessed through the + :py:class:`config <_pytest.config.Config>` object, respectively: + + - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_configure(config): + """ + Allows plugins and conftest files to perform initial configuration. + + This hook is called for every plugin and initial conftest file + after command line options have been parsed. + + After that, the hook is called for other conftest files as they are + imported. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + + :arg _pytest.config.Config config: pytest config object + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse(pluginmanager, args): + """return initialized config object, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult` + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager + :param list[str] args: list of arguments passed on the command line + """ + + +def pytest_cmdline_preparse(config, args): + """(**Deprecated**) modify command line arguments before option parsing. + + This hook is considered deprecated and will be removed in a future pytest version. Consider + using :func:`pytest_load_initial_conftests` instead. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config config: pytest config object + :param list[str] args: list of arguments passed on the command line + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config): + """ called for performing the main command line action. The default + implementation will invoke the configure hooks and runtest_mainloop. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.config.Config config: pytest config object + """ + + +def pytest_load_initial_conftests(early_config, parser, args): + """ implements the loading of initial conftest files ahead + of command line option parsing. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config early_config: pytest config object + :param list[str] args: list of arguments passed on the command line + :param _pytest.config.Parser parser: to add command line options + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session): + """Perform the collection protocol for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + + :param _pytest.main.Session session: the pytest session object + """ + + +def pytest_collection_modifyitems(session, config, items): + """ called after collection has been performed, may filter or re-order + the items in-place. + + :param _pytest.main.Session session: the pytest session object + :param _pytest.config.Config config: pytest config object + :param List[_pytest.nodes.Item] items: list of item objects + """ + + +def pytest_collection_finish(session): + """ called after collection has been performed and modified. + + :param _pytest.main.Session session: the pytest session object + """ + + +@hookspec(firstresult=True) +def pytest_ignore_collect(path, config): + """ return True to prevent considering this path for collection. + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult` + + :param str path: the path to analyze + :param _pytest.config.Config config: pytest config object + """ + + +@hookspec(firstresult=True) +def pytest_collect_directory(path, parent): + """ called before traversing a directory for collection files. + + Stops at first non-None result, see :ref:`firstresult` + + :param str path: the path to analyze + """ + + +def pytest_collect_file(path, parent): + """ return collection Node or None for the given path. Any new node + needs to have the specified ``parent`` as a parent. + + :param str path: the path to collect + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector): + """ collector starts collecting. """ + + +def pytest_itemcollected(item): + """ we just collected a test item. """ + + +def pytest_collectreport(report): + """ collector finished collecting. """ + + +def pytest_deselected(items): + """ called for test items deselected by keyword. """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector): + """ perform ``collector.collect()`` and return a CollectReport. + + Stops at first non-None result, see :ref:`firstresult` """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_pycollect_makemodule(path, parent): + """ return a Module collector or None for the given path. + This hook will be called for each matching test module path. + The pytest_collect_file hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult` """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem(collector, name, obj): + """ return custom item/collector for a python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult` """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem): + """ call underlying test function. + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_generate_tests(metafunc): + """ generate (multiple) parametrized calls to a test function.""" + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id(config, val, argname): + """Return a user-friendly string representation of the given ``val`` that will be used + by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.config.Config config: pytest config object + :param val: the parametrized value + :param str argname: the automatic parameter name produced by pytest + """ + + +# ------------------------------------------------------------------------- +# generic runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session): + """ called for performing the main runtest loop + (after collection finished). + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.main.Session session: the pytest session object + """ + + +def pytest_itemstart(item, node): + """(**Deprecated**) use pytest_runtest_logstart. """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol(item, nextitem): + """ implements the runtest_setup/call/teardown protocol for + the given test item, including capturing exceptions and calling + reporting hooks. + + :arg item: test item for which the runtest protocol is performed. + + :arg nextitem: the scheduled-to-be-next test item (or None if this + is the end my friend). This argument is passed on to + :py:func:`pytest_runtest_teardown`. + + :return boolean: True if no further hook implementations should be invoked. + + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_runtest_logstart(nodeid, location): + """ signal the start of running a single test item. + + This hook will be called **before** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and + :func:`pytest_runtest_teardown` hooks. + + :param str nodeid: full id of the item + :param location: a triple of ``(filename, linenum, testname)`` + """ + + +def pytest_runtest_logfinish(nodeid, location): + """ signal the complete finish of running a single test item. + + This hook will be called **after** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and + :func:`pytest_runtest_teardown` hooks. + + :param str nodeid: full id of the item + :param location: a triple of ``(filename, linenum, testname)`` + """ + + +def pytest_runtest_setup(item): + """ called before ``pytest_runtest_call(item)``. """ + + +def pytest_runtest_call(item): + """ called to execute the test ``item``. """ + + +def pytest_runtest_teardown(item, nextitem): + """ called after ``pytest_runtest_call``. + + :arg nextitem: the scheduled-to-be-next test item (None if no further + test item is scheduled). This argument can be used to + perform exact teardowns, i.e. calling just enough finalizers + so that nextitem only needs to call setup-functions. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport(item, call): + """ return a :py:class:`_pytest.runner.TestReport` object + for the given :py:class:`pytest.Item <_pytest.main.Item>` and + :py:class:`_pytest.runner.CallInfo`. + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_runtest_logreport(report): + """ process a test setup/call/teardown report relating to + the respective phase of executing a test. """ + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup(fixturedef, request): + """ performs fixture setup execution. + + :return: The return value of the call to the fixture function + + Stops at first non-None result, see :ref:`firstresult` + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + """ + + +def pytest_fixture_post_finalizer(fixturedef, request): + """ called after fixture teardown, but before the cache is cleared so + the fixture result cache ``fixturedef.cached_result`` can + still be accessed.""" + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session): + """ called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param _pytest.main.Session session: the pytest session object + """ + + +def pytest_sessionfinish(session, exitstatus): + """ called after whole test run finished, right before returning the exit status to the system. + + :param _pytest.main.Session session: the pytest session object + :param int exitstatus: the status which pytest will return to the system + """ + + +def pytest_unconfigure(config): + """ called before test process is exited. + + :param _pytest.config.Config config: pytest config object + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare(config, op, left, right): + """return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param _pytest.config.Config config: pytest config object + """ + + +# ------------------------------------------------------------------------- +# hooks for influencing reporting (invoked from _pytest_terminal) +# ------------------------------------------------------------------------- + + +def pytest_report_header(config, startdir): + """ return a string or list of strings to be displayed as header info for terminal reporting. + + :param _pytest.config.Config config: pytest config object + :param startdir: py.path object with the starting dir + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + """ + + +def pytest_report_collectionfinish(config, startdir, items): + """ + .. versionadded:: 3.2 + + return a string or list of strings to be displayed after collection has finished successfully. + + This strings will be displayed after the standard "collected X items" message. + + :param _pytest.config.Config config: pytest config object + :param startdir: py.path object with the starting dir + :param items: list of pytest items that are going to be executed; this list should not be modified. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus(report): + """ return result-category, shortletter and verbose word for reporting. + + Stops at first non-None result, see :ref:`firstresult` """ + + +def pytest_terminal_summary(terminalreporter, exitstatus): + """Add a section to terminal summary reporting. + + :param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object + :param int exitstatus: the exit status that will be reported back to the OS + + .. versionadded:: 3.5 + The ``config`` parameter. + """ + + +@hookspec(historic=True) +def pytest_logwarning(message, code, nodeid, fslocation): + """ + .. deprecated:: 3.8 + + This hook is will stop working in a future release. + + pytest no longer triggers this hook, but the + terminal writer still implements it to display warnings issued by + :meth:`_pytest.config.Config.warn` and :meth:`_pytest.nodes.Node.warn`. Calling those functions will be + an error in future releases. + + process a warning specified by a message, a code string, + a nodeid and fslocation (both of which may be None + if the warning is not tied to a particular node/location). + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_warning_captured(warning_message, when, item): + """ + Process a warning captured by the internal pytest warnings plugin. + + :param warnings.WarningMessage warning_message: + The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains + the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param str when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param pytest.Item|None item: + **DEPRECATED**: This parameter is incompatible with ``pytest-xdist``, and will always receive ``None`` + in a future release. + + The item being executed if ``when`` is ``"runtest"``, otherwise ``None``. + """ + + +# ------------------------------------------------------------------------- +# doctest hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_doctest_prepare_content(content): + """ return processed content for a given doctest + + Stops at first non-None result, see :ref:`firstresult` """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror(excrepr, excinfo): + """ called for internal errors. """ + + +def pytest_keyboard_interrupt(excinfo): + """ called for keyboard interrupt. """ + + +def pytest_exception_interact(node, call, report): + """called when an exception was raised which can potentially be + interactively handled. + + This hook is only called if an exception was raised + that is not an internal exception like ``skip.Exception``. + """ + + +def pytest_enter_pdb(config): + """ called upon pdb.set_trace(), can be used by plugins to take special + action just before the python debugger enters in interactive mode. + + :param _pytest.config.Config config: pytest config object + """ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/hookspec.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/hookspec.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5c3e37cf38877a400a8096dd150196e34c9d685 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/hookspec.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/junitxml.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/junitxml.py new file mode 100644 index 0000000000000000000000000000000000000000..09847c942da7dce9c403e8cd751f9b25bc9fb731 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/junitxml.py @@ -0,0 +1,569 @@ +""" + report test results in JUnit-XML format, + for use with Jenkins and build integration servers. + + +Based on initial code from Ross Lawley. + +Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ +src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os +import re +import sys +import time + +import py + +import pytest +from _pytest import nodes +from _pytest.config import filename_arg + +# Python 2.X and 3.X compatibility +if sys.version_info[0] < 3: + from codecs import open +else: + unichr = chr + unicode = str + long = int + + +class Junit(py.xml.Namespace): + pass + + +# We need to get the subset of the invalid unicode ranges according to +# XML 1.0 which are valid in this python build. Hence we calculate +# this dynamically instead of hardcoding it. The spec range of valid +# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] +# | [#x10000-#x10FFFF] +_legal_chars = (0x09, 0x0A, 0x0D) +_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF)) +_legal_xml_re = [ + unicode("%s-%s") % (unichr(low), unichr(high)) + for (low, high) in _legal_ranges + if low < sys.maxunicode +] +_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re +illegal_xml_re = re.compile(unicode("[^%s]") % unicode("").join(_legal_xml_re)) +del _legal_chars +del _legal_ranges +del _legal_xml_re + +_py_ext_re = re.compile(r"\.py$") + + +def bin_xml_escape(arg): + def repl(matchobj): + i = ord(matchobj.group()) + if i <= 0xFF: + return unicode("#x%02X") % i + else: + return unicode("#x%04X") % i + + return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg))) + + +class _NodeReporter(object): + def __init__(self, nodeid, xml): + + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.duration = 0 + self.properties = [] + self.nodes = [] + self.testcase = None + self.attrs = {} + + def append(self, node): + self.xml.add_stats(type(node).__name__) + self.nodes.append(node) + + def add_property(self, name, value): + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name, value): + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self): + """Return a Junit node containing custom properties, if any. + """ + if self.properties: + return Junit.properties( + [ + Junit.property(name=name, value=value) + for name, value in self.properties + ] + ) + return "" + + def record_testreport(self, testreport): + assert not self.testcase + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = testreport.location[1] + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # restore any user-defined attributes + + def to_xml(self): + testcase = Junit.testcase(time=self.duration, **self.attrs) + testcase.append(self.make_properties_node()) + for node in self.nodes: + testcase.append(node) + return testcase + + def _add_simple(self, kind, message, data=None): + data = bin_xml_escape(data) + node = kind(data, message=message) + self.append(node) + + def write_captured_output(self, report): + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + + if content_log or content_out: + if content_log and self.xml.logging == "system-out": + if content_out: + # syncing stdout and the log-output is not done yet. It's + # probably not worth the effort. Therefore, first the captured + # stdout is shown and then the captured logs. + content = "\n".join( + [ + " Captured Stdout ".center(80, "-"), + content_out, + "", + " Captured Log ".center(80, "-"), + content_log, + ] + ) + else: + content = content_log + else: + content = content_out + + if content: + tag = getattr(Junit, "system-out") + self.append(tag(bin_xml_escape(content))) + + if content_log or content_err: + if content_log and self.xml.logging == "system-err": + if content_err: + content = "\n".join( + [ + " Captured Stderr ".center(80, "-"), + content_err, + "", + " Captured Log ".center(80, "-"), + content_log, + ] + ) + else: + content = content_log + else: + content = content_err + + if content: + tag = getattr(Junit, "system-err") + self.append(tag(bin_xml_escape(content))) + + def append_pass(self, report): + self.add_stats("passed") + + def append_failure(self, report): + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly") + else: + if hasattr(report.longrepr, "reprcrash"): + message = report.longrepr.reprcrash.message + elif isinstance(report.longrepr, (unicode, str)): + message = report.longrepr + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + fail = Junit.failure(message=message) + fail.append(bin_xml_escape(report.longrepr)) + self.append(fail) + + def append_collect_error(self, report): + # msg = str(report.longrepr.reprtraceback.extraline) + self.append( + Junit.error(bin_xml_escape(report.longrepr), message="collection failure") + ) + + def append_collect_skipped(self, report): + self._add_simple(Junit.skipped, "collection skipped", report.longrepr) + + def append_error(self, report): + if getattr(report, "when", None) == "teardown": + msg = "test teardown failure" + else: + msg = "test setup failure" + self._add_simple(Junit.error, msg, report.longrepr) + + def append_skipped(self, report): + if hasattr(report, "wasxfail"): + self._add_simple(Junit.skipped, "expected test failure", report.wasxfail) + else: + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = "%s:%s: %s" % (filename, lineno, skipreason) + + self.append( + Junit.skipped( + bin_xml_escape(details), + type="pytest.skip", + message=bin_xml_escape(skipreason), + ) + ) + self.write_captured_output(report) + + def finalize(self): + data = self.to_xml().unicode(indent=0) + self.__dict__.clear() + self.to_xml = lambda: py.xml.raw(data) + + +@pytest.fixture +def record_property(request): + """Add an extra properties the calling test. + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + The fixture is callable with ``(name, value)``, with value being automatically + xml-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + + def append_property(name, value): + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_property(record_property, request): + """(Deprecated) use record_property.""" + from _pytest import deprecated + + request.node.warn(deprecated.RECORD_XML_PROPERTY) + + return record_property + + +@pytest.fixture +def record_xml_attribute(request): + """Add extra xml attributes to the tag for the calling test. + The fixture is callable with ``(name, value)``, with value being + automatically xml-encoded + """ + from _pytest.warning_types import PytestWarning + + request.node.warn(PytestWarning("record_xml_attribute is an experimental feature")) + xml = getattr(request.config, "_xml", None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + return node_reporter.add_attribute + else: + + def add_attr_noop(name, value): + pass + + return add_attr_noop + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="create junit-xml style report file at given path.", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|system-out|system-err", + default="no", + ) # choices=['no', 'stdout', 'stderr']) + + +def pytest_configure(config): + xmlpath = config.option.xmlpath + # prevent opening xmllog on slave nodes (xdist) + if xmlpath and not hasattr(config, "slaveinput"): + config._xml = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + ) + config.pluginmanager.register(config._xml) + + +def pytest_unconfigure(config): + xml = getattr(config, "_xml", None) + if xml: + del config._xml + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address): + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + try: + names.remove("()") + except ValueError: + pass + # convert file path to dotted path + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = _py_ext_re.sub("", names[0]) + # put any params back + names[-1] += possible_open_bracket + params + return names + + +class LogXML(object): + def __init__(self, logfile, prefix, suite_name="pytest", logging="no"): + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0) + self.node_reporters = {} # nodeid -> _NodeReporter + self.node_reporters_ordered = [] + self.global_properties = [] + # List of reports that failed on call but teardown is pending. + self.open_reports = [] + self.cnt_double_fail_tests = 0 + + def finalize(self, report): + nodeid = getattr(report, "nodeid", report) + # local hack to handle xdist report order + slavenode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, slavenode)) + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report): + nodeid = getattr(report, "nodeid", report) + # local hack to handle xdist report order + slavenode = getattr(report, "node", None) + + key = nodeid, slavenode + + if key in self.node_reporters: + # TODO: breasks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key): + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report): + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report): + """handle a setup/call/teardown report, generating the appropriate + xml tags as necessary. + + note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. for example: + + usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, propvalue) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report): + """accumulates total duration for nodeid from given report and updates + the Junit.testcase with the new total if already created. + """ + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report): + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr): + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple(Junit.error, "internal error", excrepr) + + def pytest_sessionstart(self): + self.suite_start_time = time.time() + + def pytest_sessionfinish(self): + dirname = os.path.dirname(os.path.abspath(self.logfile)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logfile = open(self.logfile, "w", encoding="utf-8") + suite_stop_time = time.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + logfile.write( + Junit.testsuite( + self._get_global_properties_node(), + [x.to_xml() for x in self.node_reporters_ordered], + name=self.suite_name, + errors=self.stats["error"], + failures=self.stats["failure"], + skips=self.stats["skipped"], + tests=numtests, + time="%.3f" % suite_time_delta, + ).unicode(indent=0) + ) + logfile.close() + + def pytest_terminal_summary(self, terminalreporter): + terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) + + def add_global_property(self, name, value): + self.global_properties.append((str(name), bin_xml_escape(value))) + + def _get_global_properties_node(self): + """Return a Junit node containing custom properties, if any. + """ + if self.global_properties: + return Junit.properties( + [ + Junit.property(name=name, value=value) + for name, value in self.global_properties + ] + ) + return "" diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/junitxml.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/junitxml.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93bae1b30543a549752eaab732f50cfab43a05c6 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/junitxml.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/logging.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..45fd5950decf366e623523abffbcfcee088e3d80 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/logging.py @@ -0,0 +1,625 @@ +""" Access and control log capturing. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging +import re +from contextlib import contextmanager + +import py +import six + +import pytest +from _pytest.compat import dummy_context_manager +from _pytest.config import create_terminal_writer + + +DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" + + +class ColoredLevelFormatter(logging.Formatter): + """ + Colorize the %(levelname)..s part of the log format passed to __init__. + """ + + LOGLEVEL_COLOROPTS = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)") + + def __init__(self, terminalwriter, *args, **kwargs): + super(ColoredLevelFormatter, self).__init__(*args, **kwargs) + if six.PY2: + self._original_fmt = self._fmt + else: + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping = {} + + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + formatted_levelname = levelname_fmt % { + "levelname": logging.getLevelName(level) + } + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record): + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + if six.PY2: + self._fmt = fmt + else: + self._style._fmt = fmt + return super(ColoredLevelFormatter, self).format(record) + + +def get_option_ini(config, *names): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser): + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--no-print-logs", + dest="log_print", + action="store_const", + const=False, + default=True, + type="bool", + help="disable printing caught logs on failed tests.", + ) + add_option_ini( + "--log-level", + dest="log_level", + default=None, + help="logging level used by the logging module", + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='enable log display during test run (also known as "live logging").', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="cli logging level." + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="log date format as used by the logging module.", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="path to a file when logging will be written to.", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="log file logging level.", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=DEFAULT_LOG_FORMAT, + help="log format as used by the logging module.", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="log date format as used by the logging module.", + ) + + +@contextmanager +def catching_logs(handler, formatter=None, level=None): + """Context manager that prepares the whole logging machinery properly.""" + root_logger = logging.getLogger() + + if formatter is not None: + handler.setFormatter(formatter) + if level is not None: + handler.setLevel(level) + + # Adding the same handler twice would confuse logging system. + # Just don't do that. + add_new_handler = handler not in root_logger.handlers + + if add_new_handler: + root_logger.addHandler(handler) + if level is not None: + orig_level = root_logger.level + root_logger.setLevel(min(orig_level, level)) + try: + yield handler + finally: + if level is not None: + root_logger.setLevel(orig_level) + if add_new_handler: + root_logger.removeHandler(handler) + + +class LogCaptureHandler(logging.StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self): + """Creates a new log handler.""" + logging.StreamHandler.__init__(self, py.io.TextIO()) + self.records = [] + + def emit(self, record): + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + logging.StreamHandler.emit(self, record) + + def reset(self): + self.records = [] + self.stream = py.io.TextIO() + + +class LogCaptureFixture(object): + """Provides access and control of log capturing.""" + + def __init__(self, item): + """Creates a new funcarg.""" + self._item = item + # dict of log name -> log level + self._initial_log_levels = {} # type: Dict[str, int] + + def _finalize(self): + """Finalizes the fixture. + + This restores the log levels changed by :meth:`set_level`. + """ + # restore log levels + for logger_name, level in self._initial_log_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + + @property + def handler(self): + """ + :rtype: LogCaptureHandler + """ + return self._item.catch_log_handler + + def get_records(self, when): + """ + Get the logging records for one of the possible test phases. + + :param str when: + Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown". + + :rtype: List[logging.LogRecord] + :return: the list of captured records at the given stage + + .. versionadded:: 3.4 + """ + handler = self._item.catch_log_handlers.get(when) + if handler: + return handler.records + else: + return [] + + @property + def text(self): + """Returns the log text.""" + return self.handler.stream.getvalue() + + @property + def records(self): + """Returns the list of log records.""" + return self.handler.records + + @property + def record_tuples(self): + """Returns a list of a stripped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self): + """Returns a list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list + are all interpolated. + Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with + levels, timestamps, etc, making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments + to the logging functions) is not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self): + """Reset the list of log records and the captured log text.""" + self.handler.reset() + + def set_level(self, level, logger=None): + """Sets the level for capturing of logs. The level will be restored to its previous value at the end of + the test. + + :param int level: the logger to level. + :param str logger: the logger to update the level. If not given, the root logger level is updated. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be restored to their initial values at the + end of the test. + """ + logger_name = logger + logger = logging.getLogger(logger_name) + # save the original log-level to restore it during teardown + self._initial_log_levels.setdefault(logger_name, logger.level) + logger.setLevel(level) + + @contextmanager + def at_level(self, level, logger=None): + """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the + level is restored to its original value. + + :param int level: the logger to level. + :param str logger: the logger to update the level. If not given, the root logger level is updated. + """ + logger = logging.getLogger(logger) + orig_level = logger.level + logger.setLevel(level) + try: + yield + finally: + logger.setLevel(orig_level) + + +@pytest.fixture +def caplog(request): + """Access and control log capturing. + + Captured logs are available through the following properties/methods:: + + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node) + yield result + result._finalize() + + +def get_actual_log_level(config, *setting_names): + """Return the actual logging level.""" + + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return + + if isinstance(log_level, six.string_types): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError: + # Python logging does not recognise this as a logging level + raise pytest.UsageError( + "'{}' is not recognized as a logging level name for " + "'{}'. Please consider passing the " + "logging level num instead.".format(log_level, setting_name) + ) + + +def pytest_configure(config): + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin(object): + """Attaches to the logging module and captures log messages for each test. + """ + + def __init__(self, config): + """Creates a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # enable verbose output automatically if live logging is enabled + if self._log_cli_enabled() and not config.getoption("verbose"): + # sanity check: terminal reporter should not have been loaded at this point + assert self._config.pluginmanager.get_plugin("terminalreporter") is None + config.option.verbose = 1 + + self.print_logs = get_option_ini(config, "log_print") + self.formatter = logging.Formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + ) + self.log_level = get_actual_log_level(config, "log_level") + + log_file = get_option_ini(config, "log_file") + if log_file: + self.log_file_level = get_actual_log_level(config, "log_file_level") + + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + # Each pytest runtests session will write to a clean logfile + self.log_file_handler = logging.FileHandler( + log_file, mode="w", encoding="UTF-8" + ) + log_file_formatter = logging.Formatter( + log_file_format, datefmt=log_file_date_format + ) + self.log_file_handler.setFormatter(log_file_formatter) + else: + self.log_file_handler = None + + self.log_cli_handler = None + + def _log_cli_enabled(self): + """Return True if log_cli should be considered enabled, either explicitly + or because --log-cli-level was given in the command-line. + """ + return self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection(self): + # This has to be called before the first log message is logged, + # so we can access the terminal reporter plugin. + self._setup_cli_logging() + + with self.live_logs_context(): + if self.log_cli_handler: + self.log_cli_handler.set_when("collection") + + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + else: + yield + + @contextmanager + def _runtest_for(self, item, when): + """Implements the internals of pytest_runtest_xxx() hook.""" + with catching_logs( + LogCaptureHandler(), formatter=self.formatter, level=self.log_level + ) as log_handler: + if self.log_cli_handler: + self.log_cli_handler.set_when(when) + + if item is None: + yield # run the test + return + + if not hasattr(item, "catch_log_handlers"): + item.catch_log_handlers = {} + item.catch_log_handlers[when] = log_handler + item.catch_log_handler = log_handler + try: + yield # run test + finally: + if when == "teardown": + del item.catch_log_handler + del item.catch_log_handlers + + if self.print_logs: + # Add a captured log section to the report. + log = log_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item): + with self._runtest_for(item, "setup"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item): + with self._runtest_for(item, "call"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item): + with self._runtest_for(item, "teardown"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_logstart(self): + if self.log_cli_handler: + self.log_cli_handler.reset() + with self._runtest_for(None, "start"): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_logfinish(self): + with self._runtest_for(None, "finish"): + yield + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionfinish(self): + with self.live_logs_context(): + if self.log_cli_handler: + self.log_cli_handler.set_when("sessionfinish") + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + else: + yield + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionstart(self): + self._setup_cli_logging() + with self.live_logs_context(): + if self.log_cli_handler: + self.log_cli_handler.set_when("sessionstart") + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + else: + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtestloop(self, session): + """Runs all collected test items.""" + with self.live_logs_context(): + if self.log_file_handler is not None: + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield # run all the tests + else: + yield # run all the tests + + def _setup_cli_logging(self): + """Sets up the handler and logger for the Live Logs feature, if enabled.""" + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") + if self._log_cli_enabled() and terminal_reporter is not None: + capture_manager = self._config.pluginmanager.get_plugin("capturemanager") + log_cli_handler = _LiveLoggingStreamHandler( + terminal_reporter, capture_manager + ) + log_cli_format = get_option_ini( + self._config, "log_cli_format", "log_format" + ) + log_cli_date_format = get_option_ini( + self._config, "log_cli_date_format", "log_date_format" + ) + if ( + self._config.option.color != "no" + and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format) + ): + log_cli_formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), + log_cli_format, + datefmt=log_cli_date_format, + ) + else: + log_cli_formatter = logging.Formatter( + log_cli_format, datefmt=log_cli_date_format + ) + log_cli_level = get_actual_log_level( + self._config, "log_cli_level", "log_level" + ) + self.log_cli_handler = log_cli_handler + self.live_logs_context = lambda: catching_logs( + log_cli_handler, formatter=log_cli_formatter, level=log_cli_level + ) + else: + self.live_logs_context = lambda: dummy_context_manager() + # Note that the lambda for the live_logs_context is needed because + # live_logs_context can otherwise not be entered multiple times due + # to limitations of contextlib.contextmanager + + +class _LiveLoggingStreamHandler(logging.StreamHandler): + """ + Custom StreamHandler used by the live logging feature: it will write a newline before the first log message + in each test. + + During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured + and won't appear in the terminal. + """ + + def __init__(self, terminal_reporter, capture_manager): + """ + :param _pytest.terminal.TerminalReporter terminal_reporter: + :param _pytest.capture.CaptureManager capture_manager: + """ + logging.StreamHandler.__init__(self, stream=terminal_reporter) + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self): + """Reset the handler; should be called before the start of each test""" + self._first_record_emitted = False + + def set_when(self, when): + """Prepares for the given test phase (setup/call/teardown)""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record): + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else dummy_context_manager() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + logging.StreamHandler.emit(self, record) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/logging.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/logging.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0349b04e2991d7a955cfadd1dad9be8758f1e7e3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/logging.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/main.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/main.py new file mode 100644 index 0000000000000000000000000000000000000000..f27270f262c099b4503a469264eb3ac74d1b7a96 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/main.py @@ -0,0 +1,672 @@ +""" core implementation of testing process: init, session, runtest loop. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import functools +import os +import pkgutil +import sys + +import py +import six + +import _pytest._code +from _pytest import nodes +from _pytest.config import directory_arg +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.outcomes import exit +from _pytest.runner import collect_one_node + + +# exitcodes for the command line +EXIT_OK = 0 +EXIT_TESTSFAILED = 1 +EXIT_INTERRUPTED = 2 +EXIT_INTERNALERROR = 3 +EXIT_USAGEERROR = 4 +EXIT_NOTESTSCOLLECTED = 5 + + +def pytest_addoption(parser): + parser.addini( + "norecursedirs", + "directory patterns to avoid for recursion", + type="args", + default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"], + ) + parser.addini( + "testpaths", + "directories to search for tests when no files or directories are given in the " + "command line.", + type="args", + default=[], + ) + # parser.addini("dirpatterns", + # "patterns specifying possible locations of test files", + # type="linelist", default=["**/test_*.txt", + # "**/test_*.py", "**/*_test.py"] + # ) + group = parser.getgroup("general", "running and selection options") + group._addoption( + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="exit instantly on first error or failed test.", + ), + group._addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="exit after first num failures or errors.", + ) + group._addoption( + "--strict", + action="store_true", + help="marks not registered in configuration file raise errors.", + ) + group._addoption( + "-c", + metavar="file", + type=str, + dest="inifilename", + help="load configuration from `file` instead of trying to locate one of the implicit " + "configuration files.", + ) + group._addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.", + ) + group._addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + action="store_true", + help="only collect tests, don't execute them.", + ), + group.addoption( + "--pyargs", + action="store_true", + help="try to interpret all arguments as python packages.", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="ignore path during collection (multi-allowed).", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="deselect item during collection (multi-allowed).", + ) + # when changing this to --conf-cut-dir, config.py Conftest.setinitial + # needs upgrading as well + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="only load conftest.py's relative to specified dir.", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files.", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests.", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + metavar="dir", + help=( + "base temporary directory for this test run." + "(warning: this directory is removed if it exists)" + ), + ) + + +def pytest_configure(config): + __import__("pytest").config = config # compatibility + + +def wrap_session(config, doit): + """Skeleton command line program""" + session = Session(config) + session.exitstatus = EXIT_OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + raise + except Failed: + session.exitstatus = EXIT_TESTSFAILED + except KeyboardInterrupt: + excinfo = _pytest._code.ExceptionInfo() + exitstatus = EXIT_INTERRUPTED + if initstate <= 2 and isinstance(excinfo.value, exit.Exception): + sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg)) + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except: # noqa + excinfo = _pytest._code.ExceptionInfo() + config.notify_exception(excinfo, config.option) + session.exitstatus = EXIT_INTERNALERROR + if excinfo.errisinstance(SystemExit): + sys.stderr.write("mainloop: caught Spurious SystemExit!\n") + + finally: + excinfo = None # Explicitly break reference cycle. + session.startdir.chdir() + if initstate >= 2: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config): + return wrap_session(config, _main) + + +def _main(config, session): + """ default command line protocol for initialization, session, + running tests and reporting. """ + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return EXIT_TESTSFAILED + elif session.testscollected == 0: + return EXIT_NOTESTSCOLLECTED + + +def pytest_collection(session): + return session.perform_collect() + + +def pytest_runtestloop(session): + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted("%d errors during collection" % session.testsfailed) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path): + """Attempts to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the appropriate activate script""" + bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin") + if not bindir.isdir(): + return False + activates = ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ) + return any([fname.basename in activates for fname in bindir.listdir()]) + + +def pytest_ignore_collect(path, config): + ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend([py.path.local(x) for x in excludeopt]) + + if py.path.local(path) in ignore_paths: + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if _in_venv(path) and not allow_in_venv: + return True + + # Skip duplicate paths. + keepduplicates = config.getoption("keepduplicates") + duplicate_paths = config.pluginmanager._duplicatepaths + if not keepduplicates: + if path in duplicate_paths: + return True + else: + duplicate_paths.add(path) + + return False + + +def pytest_collection_modifyitems(items, config): + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@contextlib.contextmanager +def _patched_find_module(): + """Patch bug in pkgutil.ImpImporter.find_module + + When using pkgutil.find_loader on python<3.4 it removes symlinks + from the path due to a call to os.path.realpath. This is not consistent + with actually doing the import (in these versions, pkgutil and __import__ + did not share the same underlying code). This can break conftest + discovery for pytest where symlinks are involved. + + The only supported python<3.4 by pytest is python 2.7. + """ + if six.PY2: # python 3.4+ uses importlib instead + + def find_module_patched(self, fullname, path=None): + # Note: we ignore 'path' argument since it is only used via meta_path + subname = fullname.split(".")[-1] + if subname != fullname and self.path is None: + return None + if self.path is None: + path = None + else: + # original: path = [os.path.realpath(self.path)] + path = [self.path] + try: + file, filename, etc = pkgutil.imp.find_module(subname, path) + except ImportError: + return None + return pkgutil.ImpLoader(fullname, file, filename, etc) + + old_find_module = pkgutil.ImpImporter.find_module + pkgutil.ImpImporter.find_module = find_module_patched + try: + yield + finally: + pkgutil.ImpImporter.find_module = old_find_module + else: + yield + + +class FSHookProxy(object): + def __init__(self, fspath, pm, remove_mods): + self.fspath = fspath + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name): + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class NoMatch(Exception): + """ raised if matching cannot locate a matching names. """ + + +class Interrupted(KeyboardInterrupt): + """ signals an interrupted test run. """ + + __module__ = "builtins" # for py3 + + +class Failed(Exception): + """ signals a stop as failed test run. """ + + +class Session(nodes.FSCollector): + Interrupted = Interrupted + Failed = Failed + + def __init__(self, config): + nodes.FSCollector.__init__( + self, config.rootdir, parent=None, config=config, session=self, nodeid="" + ) + self.testsfailed = 0 + self.testscollected = 0 + self.shouldstop = False + self.shouldfail = False + self.trace = config.trace.root.get("collection") + self._norecursepatterns = config.getini("norecursedirs") + self.startdir = py.path.local() + self._initialpaths = frozenset() + # Keep track of any collected nodes in here, so we don't duplicate fixtures + self._node_cache = {} + + self.config.pluginmanager.register(self, name="session") + + @hookimpl(tryfirst=True) + def pytest_collectstart(self): + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport(self, report): + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = "stopping after %d failures" % (self.testsfailed) + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath(self, path): + return path in self._initialpaths + + def gethookproxy(self, fspath): + # check if we have the common case of running + # hooks with all conftest.py files + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules(fspath) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # one or more conftests are not in use at this fspath + proxy = FSHookProxy(fspath, pm, remove_mods) + else: + # all plugis are active for this fspath + proxy = self.config.hook + return proxy + + def perform_collect(self, args=None, genitems=True): + hook = self.config.hook + try: + items = self._perform_collect(args, genitems) + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + hook.pytest_collection_finish(session=self) + self.testscollected = len(items) + return items + + def _perform_collect(self, args, genitems): + if args is None: + args = self.config.args + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + self._notfound = [] + initialpaths = [] + self._initialparts = [] + self.items = items = [] + for arg in args: + parts = self._parsearg(arg) + self._initialparts.append(parts) + initialpaths.append(parts[0]) + self._initialpaths = frozenset(initialpaths) + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, exc in self._notfound: + line = "(no name %r in any of %r)" % (arg, exc.args[0]) + errors.append("not found: %s\n%s" % (arg, line)) + # XXX: test this + raise UsageError(*errors) + if not genitems: + return rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + return items + + def collect(self): + for parts in self._initialparts: + arg = "::".join(map(str, parts)) + self.trace("processing argument", arg) + self.trace.root.indent += 1 + try: + for x in self._collect(arg): + yield x + except NoMatch: + # we are inside a make_report hook so + # we cannot directly pass through the exception + self._notfound.append((arg, sys.exc_info()[1])) + + self.trace.root.indent -= 1 + + def _collect(self, arg): + from _pytest.python import Package + + names = self._parsearg(arg) + argpath = names.pop(0).realpath() + paths = [] + + root = self + # Start with a Session root, and delve to argpath item (dir or file) + # and stack all Packages found on the way. + # No point in finding packages when collecting doctests + if not self.config.option.doctestmodules: + for parent in argpath.parts(): + pm = self.config.pluginmanager + if pm._confcutdir and pm._confcutdir.relto(parent): + continue + + if parent.isdir(): + pkginit = parent.join("__init__.py") + if pkginit.isfile(): + if pkginit in self._node_cache: + root = self._node_cache[pkginit][0] + else: + col = root._collectfile(pkginit) + if col: + if isinstance(col[0], Package): + root = col[0] + # always store a list in the cache, matchnodes expects it + self._node_cache[root.fspath] = [root] + + # If it's a directory argument, recurse and look for any Subpackages. + # Let the Package collector deal with subnodes, don't collect here. + if argpath.check(dir=1): + assert not names, "invalid arg %r" % (arg,) + for path in argpath.visit( + fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True + ): + pkginit = path.dirpath().join("__init__.py") + if pkginit.exists() and not any(x in pkginit.parts() for x in paths): + for x in root._collectfile(pkginit): + yield x + paths.append(x.fspath.dirpath()) + + if not any(x in path.parts() for x in paths): + for x in root._collectfile(path): + if (type(x), x.fspath) in self._node_cache: + yield self._node_cache[(type(x), x.fspath)] + else: + self._node_cache[(type(x), x.fspath)] = x + yield x + else: + assert argpath.check(file=1) + + if argpath in self._node_cache: + col = self._node_cache[argpath] + else: + col = root._collectfile(argpath) + if col: + self._node_cache[argpath] = col + for y in self.matchnodes(col, names): + yield y + + def _collectfile(self, path): + ihook = self.gethookproxy(path) + if not self.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + return ihook.pytest_collect_file(path=path, parent=self) + + def _recurse(self, path): + ihook = self.gethookproxy(path.dirpath()) + if ihook.pytest_ignore_collect(path=path, config=self.config): + return + for pat in self._norecursepatterns: + if path.check(fnmatch=pat): + return False + ihook = self.gethookproxy(path) + ihook.pytest_collect_directory(path=path, parent=self) + return True + + def _tryconvertpyarg(self, x): + """Convert a dotted module name to path.""" + try: + with _patched_find_module(): + loader = pkgutil.find_loader(x) + except ImportError: + return x + if loader is None: + return x + # This method is sometimes invoked when AssertionRewritingHook, which + # does not define a get_filename method, is already in place: + try: + with _patched_find_module(): + path = loader.get_filename(x) + except AttributeError: + # Retrieve path from AssertionRewritingHook: + path = loader.modules[x][0].co_filename + if loader.is_package(x): + path = os.path.dirname(path) + return path + + def _parsearg(self, arg): + """ return (fspath, names) tuple after checking the file exists. """ + parts = str(arg).split("::") + if self.config.option.pyargs: + parts[0] = self._tryconvertpyarg(parts[0]) + relpath = parts[0].replace("/", os.sep) + path = self.config.invocation_dir.join(relpath, abs=True) + if not path.check(): + if self.config.option.pyargs: + raise UsageError( + "file or package not found: " + arg + " (missing __init__.py?)" + ) + raise UsageError("file not found: " + arg) + parts[0] = path + return parts + + def matchnodes(self, matching, names): + self.trace("matchnodes", matching, names) + self.trace.root.indent += 1 + nodes = self._matchnodes(matching, names) + num = len(nodes) + self.trace("matchnodes finished -> ", num, "nodes") + self.trace.root.indent -= 1 + if num == 0: + raise NoMatch(matching, names[:1]) + return nodes + + def _matchnodes(self, matching, names): + if not matching or not names: + return matching + name = names[0] + assert name + nextnames = names[1:] + resultnodes = [] + for node in matching: + if isinstance(node, nodes.Item): + if not names: + resultnodes.append(node) + continue + assert isinstance(node, nodes.Collector) + key = (type(node), node.nodeid) + if key in self._node_cache: + rep = self._node_cache[key] + else: + rep = collect_one_node(node) + self._node_cache[key] = rep + if rep.passed: + has_matched = False + for x in rep.result: + # TODO: remove parametrized workaround once collection structure contains parametrization + if x.name == name or x.name.split("[")[0] == name: + resultnodes.extend(self.matchnodes([x], nextnames)) + has_matched = True + # XXX accept IDs that don't have "()" for class instances + if not has_matched and len(rep.result) == 1 and x.name == "()": + nextnames.insert(0, name) + resultnodes.extend(self.matchnodes([x], nextnames)) + else: + # report collection failures here to avoid failing to run some test + # specified in the command line because the module could not be + # imported (#134) + node.ihook.pytest_collectreport(report=rep) + return resultnodes + + def genitems(self, node): + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + rep = collect_one_node(node) + if rep.passed: + for subnode in rep.result: + for x in self.genitems(subnode): + yield x + node.ihook.pytest_collectreport(report=rep) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/main.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/main.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0038f06e76ed3ecfa1177f280a0a486450d7f456 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/main.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b6495dd0345ab0361141bef35f0453d3f3e6b89e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,171 @@ +""" generic mechanism for marking and selecting python functions. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from .legacy import matchkeyword +from .legacy import matchmark +from .structures import EMPTY_PARAMETERSET_OPTION +from .structures import get_empty_parameterset_mark +from .structures import Mark +from .structures import MARK_GEN +from .structures import MarkDecorator +from .structures import MarkGenerator +from .structures import MarkInfo +from .structures import ParameterSet +from .structures import transfer_markers +from _pytest.config import UsageError + +__all__ = [ + "Mark", + "MarkInfo", + "MarkDecorator", + "MarkGenerator", + "transfer_markers", + "get_empty_parameterset_mark", +] + + +def param(*values, **kw): + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize("test_input,expected", [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ]) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: variable args of the values of the parameter set, in order. + :keyword marks: a single mark or a list of marks to be applied to this parameter set. + :keyword str id: the id to attribute to this parameter set. + """ + return ParameterSet.param(*values, **kw) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="only run tests which match the given substring expression. " + "An expression is a python evaluatable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them.", + ) + + group._addoption( + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="only run tests matching given mark expression. " + "example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets") + + +def pytest_cmdline_main(config): + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write("@pytest.mark.%s:" % name, bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + +pytest_cmdline_main.tryfirst = True + + +def deselect_by_keyword(items, config): + keywordexpr = config.option.keyword.lstrip() + if keywordexpr.startswith("-"): + keywordexpr = "not " + keywordexpr[1:] + selectuntil = False + if keywordexpr[-1:] == ":": + selectuntil = True + keywordexpr = keywordexpr[:-1] + + remaining = [] + deselected = [] + for colitem in items: + if keywordexpr and not matchkeyword(colitem, keywordexpr): + deselected.append(colitem) + else: + if selectuntil: + keywordexpr = None + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def deselect_by_mark(items, config): + matchexpr = config.option.markexpr + if not matchexpr: + return + + remaining = [] + deselected = [] + for item in items: + if matchmark(item, matchexpr): + remaining.append(item) + else: + deselected.append(item) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def pytest_collection_modifyitems(items, config): + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config): + config._old_mark_config = MARK_GEN._config + if config.option.strict: + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + "{!s} must be one of skip, xfail or fail_at_collect" + " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) + ) + + +def pytest_unconfigure(config): + MARK_GEN._config = getattr(config, "_old_mark_config", None) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d411170f1f1e34682ea62238351d1c94b4895167 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/evaluate.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..793bff79e83657fcb1e881bf05418a4dfd44a581 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/evaluate.py @@ -0,0 +1,125 @@ +import os +import platform +import sys +import traceback + +import six + +from ..outcomes import fail +from ..outcomes import TEST_OUTCOME + + +def cached_eval(config, expr, d): + if not hasattr(config, "_evalcache"): + config._evalcache = {} + try: + return config._evalcache[expr] + except KeyError: + import _pytest._code + + exprcode = _pytest._code.compile(expr, mode="eval") + config._evalcache[expr] = x = eval(exprcode, d) + return x + + +class MarkEvaluator(object): + def __init__(self, item, name): + self.item = item + self._marks = None + self._mark = None + self._mark_name = name + + def __bool__(self): + # dont cache here to prevent staleness + return bool(self._get_marks()) + + __nonzero__ = __bool__ + + def wasvalid(self): + return not hasattr(self, "exc") + + def _get_marks(self): + return list(self.item.iter_markers(name=self._mark_name)) + + def invalidraise(self, exc): + raises = self.get("raises") + if not raises: + return + return not isinstance(exc, raises) + + def istrue(self): + try: + return self._istrue() + except TEST_OUTCOME: + self.exc = sys.exc_info() + if isinstance(self.exc[1], SyntaxError): + msg = [" " * (self.exc[1].offset + 4) + "^"] + msg.append("SyntaxError: invalid syntax") + else: + msg = traceback.format_exception_only(*self.exc[:2]) + fail( + "Error evaluating %r expression\n" + " %s\n" + "%s" % (self._mark_name, self.expr, "\n".join(msg)), + pytrace=False, + ) + + def _getglobals(self): + d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} + if hasattr(self.item, "obj"): + d.update(self.item.obj.__globals__) + return d + + def _istrue(self): + if hasattr(self, "result"): + return self.result + self._marks = self._get_marks() + + if self._marks: + self.result = False + for mark in self._marks: + self._mark = mark + if "condition" in mark.kwargs: + args = (mark.kwargs["condition"],) + else: + args = mark.args + + for expr in args: + self.expr = expr + if isinstance(expr, six.string_types): + d = self._getglobals() + result = cached_eval(self.item.config, expr, d) + else: + if "reason" not in mark.kwargs: + # XXX better be checked at collection time + msg = ( + "you need to specify reason=STRING " + "when using booleans as conditions." + ) + fail(msg) + result = bool(expr) + if result: + self.result = True + self.reason = mark.kwargs.get("reason", None) + self.expr = expr + return self.result + + if not args: + self.result = True + self.reason = mark.kwargs.get("reason", None) + return self.result + return False + + def get(self, attr, default=None): + if self._mark is None: + return default + return self._mark.kwargs.get(attr, default) + + def getexplanation(self): + expl = getattr(self, "reason", None) or self.get("reason", None) + if not expl: + if not hasattr(self, "expr"): + return "" + else: + return "condition: " + str(self.expr) + return expl diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/evaluate.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/evaluate.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d522a66a0eddd428679015d4b49922cc4fcd5c5e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/evaluate.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/legacy.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..cea136bff744b4f20a471399af304970b13ebfe8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/legacy.py @@ -0,0 +1,101 @@ +""" +this is a place where we put datastructures used by legacy apis +we hope ot remove +""" +import keyword + +import attr + +from _pytest.config import UsageError + + +@attr.s +class MarkMapping(object): + """Provides a local mapping for markers where item access + resolves to True if the marker is present. """ + + own_mark_names = attr.ib() + + @classmethod + def from_item(cls, item): + mark_names = {mark.name for mark in item.iter_markers()} + return cls(mark_names) + + def __getitem__(self, name): + return name in self.own_mark_names + + +class KeywordMapping(object): + """Provides a local mapping for keywords. + Given a list of names, map any substring of one of these names to True. + """ + + def __init__(self, names): + self._names = names + + @classmethod + def from_item(cls, item): + mapped_names = set() + + # Add the names of the current item and any parent items + import pytest + + for item in item.listchain(): + if not isinstance(item, pytest.Instance): + mapped_names.add(item.name) + + # Add the names added as extra keywords to current or parent items + for name in item.listextrakeywords(): + mapped_names.add(name) + + # Add the names attached to the current function through direct assignment + if hasattr(item, "function"): + for name in item.function.__dict__: + mapped_names.add(name) + + return cls(mapped_names) + + def __getitem__(self, subname): + for name in self._names: + if subname in name: + return True + return False + + +python_keywords_allowed_list = ["or", "and", "not"] + + +def matchmark(colitem, markexpr): + """Tries to match on any marker names, attached to the given colitem.""" + try: + return eval(markexpr, {}, MarkMapping.from_item(colitem)) + except SyntaxError as e: + raise SyntaxError(str(e) + "\nMarker expression must be valid Python!") + + +def matchkeyword(colitem, keywordexpr): + """Tries to match given keyword expression to given collector item. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + mapping = KeywordMapping.from_item(colitem) + if " " not in keywordexpr: + # special case to allow for simple "-k pass" and "-k 1.3" + return mapping[keywordexpr] + elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]: + return not mapping[keywordexpr[4:]] + for kwd in keywordexpr.split(): + if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list: + raise UsageError( + "Python keyword '{}' not accepted in expressions passed to '-k'".format( + kwd + ) + ) + try: + return eval(keywordexpr, {}, mapping) + except SyntaxError: + raise UsageError("Wrong expression passed to '-k': {}".format(keywordexpr)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/legacy.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/legacy.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d8ba8630b899e955619c824be4420c23ed5d1ca Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/legacy.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/structures.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/structures.py new file mode 100644 index 0000000000000000000000000000000000000000..2ca1d830a7fa554d724f3d6eaefcc4fc9816e16b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/structures.py @@ -0,0 +1,471 @@ +import inspect +import warnings +from collections import namedtuple +from functools import reduce +from operator import attrgetter + +import attr +from six.moves import map + +from ..compat import getfslineno +from ..compat import MappingMixin +from ..compat import NOTSET +from ..deprecated import MARK_INFO_ATTRIBUTE +from ..deprecated import MARK_PARAMETERSET_UNPACKING +from _pytest.outcomes import fail + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +def alias(name, warning=None): + getter = attrgetter(name) + + def warned(self): + warnings.warn(warning, stacklevel=2) + return getter(self) + + return property(getter if warning is None else warned, doc="alias for " + name) + + +def istestfunc(func): + return ( + hasattr(func, "__call__") + and getattr(func, "__name__", "") != "" + ) + + +def get_empty_parameterset_mark(config, argnames, func): + from ..nodes import Collector + + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(run=False) + elif requested_mark == "fail_at_collect": + f_name = func.__name__ + _, lineno = getfslineno(func) + raise Collector.CollectError( + "Empty parameter set in '%s' at line %d" % (f_name, lineno) + ) + else: + raise LookupError(requested_mark) + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, + func.__name__, + fs, + lineno, + ) + return mark(reason=reason) + + +class ParameterSet(namedtuple("ParameterSet", "values, marks, id")): + @classmethod + def param(cls, *values, **kw): + marks = kw.pop("marks", ()) + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, (tuple, list, set)) + + def param_extract_id(id=None): + return id + + id_ = param_extract_id(**kw) + return cls(values, marks, id_) + + @classmethod + def extract_from(cls, parameterset, belonging_definition, legacy_force_tuple=False): + """ + :param parameterset: + a legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects + + :param legacy_force_tuple: + enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests + + :param belonging_definition: the item that we will be extracting the parameters from. + """ + + if isinstance(parameterset, cls): + return parameterset + if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple: + return cls.param(parameterset) + + newmarks = [] + argval = parameterset + while isinstance(argval, MarkDecorator): + newmarks.append( + MarkDecorator(Mark(argval.markname, argval.args[:-1], argval.kwargs)) + ) + argval = argval.args[-1] + assert not isinstance(argval, ParameterSet) + if legacy_force_tuple: + argval = (argval,) + + if newmarks and belonging_definition is not None: + belonging_definition.warn(MARK_PARAMETERSET_UNPACKING) + + return cls(argval, marks=newmarks, id=None) + + @classmethod + def _for_parametrize(cls, argnames, argvalues, func, config, function_definition): + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + parameters = [ + ParameterSet.extract_from( + x, + legacy_force_tuple=force_tuple, + belonging_definition=function_definition, + ) + for x in argvalues + ] + del argvalues + + if parameters: + # check all parameter sets have the correct number of values + for param in parameters: + if len(param.values) != len(argnames): + raise ValueError( + 'In "parametrize" the number of values ({}) must be ' + "equal to the number of names ({})".format( + param.values, argnames + ) + ) + else: + # empty parameter set (likely computed at runtime): create a single + # parameter set with NOSET values, with the "empty parameter set" mark applied to it + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) + ) + return argnames, parameters + + +@attr.s(frozen=True) +class Mark(object): + #: name of the mark + name = attr.ib(type=str) + #: positional arguments of the mark decorator + args = attr.ib() # type: List[object] + #: keyword arguments of the mark decorator + kwargs = attr.ib() # type: Dict[str, object] + + def combined_with(self, other): + """ + :param other: the mark to combine with + :type other: Mark + :rtype: Mark + + combines by appending aargs and merging the mappings + """ + assert self.name == other.name + return Mark( + self.name, self.args + other.args, dict(self.kwargs, **other.kwargs) + ) + + +@attr.s +class MarkDecorator(object): + """ A decorator for test functions and test classes. When applied + it will create :class:`MarkInfo` objects which may be + :ref:`retrieved by hooks as item keywords `. + MarkDecorator instances are often created like this:: + + mark1 = pytest.mark.NAME # simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a MarkDecorator instance is called it does the following: + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches itself to the class so it + gets applied automatically to all test cases found in that class. + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches a MarkInfo object to the + function, containing all the arguments already stored internally in + the MarkDecorator. + 3. When called in any other case, it performs a 'fake construction' call, + i.e. it returns a new MarkDecorator instance with the original + MarkDecorator's content updated with the arguments passed to this + call. + + Note: The rules above prevent MarkDecorator objects from storing only a + single function or class reference as their positional argument with no + additional keyword or positional arguments. + + """ + + mark = attr.ib(validator=attr.validators.instance_of(Mark)) + + name = alias("mark.name") + args = alias("mark.args") + kwargs = alias("mark.kwargs") + + @property + def markname(self): + return self.name # for backward-compat (2.4.1 had this attr) + + def __eq__(self, other): + return self.mark == other.mark if isinstance(other, MarkDecorator) else False + + def __repr__(self): + return "" % (self.mark,) + + def with_args(self, *args, **kwargs): + """ return a MarkDecorator with extra arguments added + + unlike call this can be used even if the sole argument is a callable/class + + :return: MarkDecorator + """ + + mark = Mark(self.name, args, kwargs) + return self.__class__(self.mark.combined_with(mark)) + + def __call__(self, *args, **kwargs): + """ if passed a single callable argument: decorate it with mark info. + otherwise add *args/**kwargs in-place to mark information. """ + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + if len(args) == 1 and (istestfunc(func) or is_class): + if is_class: + store_mark(func, self.mark) + else: + store_legacy_markinfo(func, self.mark) + store_mark(func, self.mark) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks(obj): + """ + obtain the unpacked marks that are stored on an object + """ + mark_list = getattr(obj, "pytestmark", []) + if not isinstance(mark_list, list): + mark_list = [mark_list] + return normalize_mark_list(mark_list) + + +def normalize_mark_list(mark_list): + """ + normalizes marker decorating helpers to mark objects + + :type mark_list: List[Union[Mark, Markdecorator]] + :rtype: List[Mark] + """ + return [getattr(mark, "mark", mark) for mark in mark_list] # unpack MarkDecorator + + +def store_mark(obj, mark): + """store a Mark on an object + this is used to implement the Mark declarations/decorators correctly + """ + assert isinstance(mark, Mark), mark + # always reassign name to avoid updating pytestmark + # in a reference that was only borrowed + obj.pytestmark = get_unpacked_marks(obj) + [mark] + + +def store_legacy_markinfo(func, mark): + """create the legacy MarkInfo objects and put them onto the function + """ + if not isinstance(mark, Mark): + raise TypeError("got {mark!r} instead of a Mark".format(mark=mark)) + holder = getattr(func, mark.name, None) + if holder is None: + holder = MarkInfo.for_mark(mark) + setattr(func, mark.name, holder) + elif isinstance(holder, MarkInfo): + holder.add_mark(mark) + + +def transfer_markers(funcobj, cls, mod): + """ + this function transfers class level markers and module level markers + into function level markinfo objects + + this is the main reason why marks are so broken + the resolution will involve phasing out function level MarkInfo objects + + """ + for obj in (cls, mod): + for mark in get_unpacked_marks(obj): + if not _marked(funcobj, mark): + store_legacy_markinfo(funcobj, mark) + + +def _marked(func, mark): + """ Returns True if :func: is already marked with :mark:, False otherwise. + This can happen if marker is applied to class and the test file is + invoked more than once. + """ + try: + func_mark = getattr(func, getattr(mark, "combined", mark).name) + except AttributeError: + return False + return any(mark == info.combined for info in func_mark) + + +@attr.s(repr=False) +class MarkInfo(object): + """ Marking object created by :class:`MarkDecorator` instances. """ + + _marks = attr.ib(converter=list) + + @_marks.validator + def validate_marks(self, attribute, value): + for item in value: + if not isinstance(item, Mark): + raise ValueError( + "MarkInfo expects Mark instances, got {!r} ({!r})".format( + item, type(item) + ) + ) + + combined = attr.ib( + repr=False, + default=attr.Factory( + lambda self: reduce(Mark.combined_with, self._marks), takes_self=True + ), + ) + + name = alias("combined.name", warning=MARK_INFO_ATTRIBUTE) + args = alias("combined.args", warning=MARK_INFO_ATTRIBUTE) + kwargs = alias("combined.kwargs", warning=MARK_INFO_ATTRIBUTE) + + @classmethod + def for_mark(cls, mark): + return cls([mark]) + + def __repr__(self): + return "".format(self.combined) + + def add_mark(self, mark): + """ add a MarkInfo with the given args and kwargs. """ + self._marks.append(mark) + self.combined = self.combined.combined_with(mark) + + def __iter__(self): + """ yield MarkInfo objects each relating to a marking-call. """ + return map(MarkInfo.for_mark, self._marks) + + +class MarkGenerator(object): + """ Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. Example:: + + import pytest + @pytest.mark.slowtest + def test_function(): + pass + + will set a 'slowtest' :class:`MarkInfo` object + on the ``test_function`` object. """ + + _config = None + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + if self._config is not None: + self._check(name) + return MarkDecorator(Mark(name, (), {})) + + def _check(self, name): + try: + if name in self._markers: + return + except AttributeError: + pass + self._markers = values = set() + for line in self._config.getini("markers"): + marker = line.split(":", 1)[0] + marker = marker.rstrip() + x = marker.split("(", 1)[0] + values.add(x) + if name not in self._markers: + fail("{!r} not a registered marker".format(name), pytrace=False) + + +MARK_GEN = MarkGenerator() + + +class NodeKeywords(MappingMixin): + def __init__(self, node): + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key): + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key, value): + self._markers[key] = value + + def __delitem__(self, key): + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self): + seen = self._seen() + return iter(seen) + + def _seen(self): + seen = set(self._markers) + if self.parent is not None: + seen.update(self.parent.keywords) + return seen + + def __len__(self): + return len(self._seen()) + + def __repr__(self): + return "" % (self.node,) + + +@attr.s(cmp=False, hash=False) +class NodeMarkers(object): + """ + internal strucutre for storing marks belongong to a node + + ..warning:: + + unstable api + + """ + + own_markers = attr.ib(default=attr.Factory(list)) + + def update(self, add_markers): + """update the own markers + """ + self.own_markers.extend(add_markers) + + def find(self, name): + """ + find markers in own nodes or parent nodes + needs a better place + """ + for mark in self.own_markers: + if mark.name == name: + yield mark + + def __iter__(self): + return iter(self.own_markers) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/structures.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/structures.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f892c77024079bb6c5cadb4142b158766a9859e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/mark/structures.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/monkeypatch.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/monkeypatch.py new file mode 100644 index 0000000000000000000000000000000000000000..2efdb73ae5ac66d45a6e1c2333ba58641966640c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,306 @@ +""" monkeypatching and mocking functionality. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import re +import sys +import warnings +from contextlib import contextmanager + +import six + +import pytest +from _pytest.fixtures import fixture + +RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$") + + +@fixture +def monkeypatch(): + """The returned ``monkeypatch`` fixture provides these + helper methods to modify objects, dictionaries or os.environ:: + + monkeypatch.setattr(obj, name, value, raising=True) + monkeypatch.delattr(obj, name, raising=True) + monkeypatch.setitem(mapping, name, value) + monkeypatch.delitem(obj, name, raising=True) + monkeypatch.setenv(name, value, prepend=False) + monkeypatch.delenv(name, raising=True) + monkeypatch.syspath_prepend(path) + monkeypatch.chdir(path) + + All modifications will be undone after the requesting + test function or fixture has finished. The ``raising`` + parameter determines if a KeyError or AttributeError + will be raised if the set/deletion operation has no target. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name): + # simplified from zope.dottedname + parts = name.split(".") + + used = parts.pop(0) + found = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # we use explicit un-nesting of the handling block in order + # to avoid nested exceptions on python 3 + try: + __import__(used) + except ImportError as ex: + # str is used for py2 vs py3 + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError("import error in %s: %s" % (used, ex)) + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj, name, ann): + try: + obj = getattr(obj, name) + except AttributeError: + raise AttributeError( + "%r object at %s has no attribute %r" % (type(obj).__name__, ann, name) + ) + return obj + + +def derive_importpath(import_path, raising): + if not isinstance(import_path, six.string_types) or "." not in import_path: + raise TypeError("must be absolute import path string, not %r" % (import_path,)) + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset(object): + def __repr__(self): + return "" + + +notset = Notset() + + +class MonkeyPatch(object): + """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes. + """ + + def __init__(self): + self._setattr = [] + self._setitem = [] + self._cwd = None + self._savesyspath = None + + @contextmanager + def context(self): + """ + Context manager that returns a new :class:`MonkeyPatch` object which + undoes any patching done inside the ``with`` block upon exit: + + .. code-block:: python + + import functools + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see `#3290 `_. + """ + m = MonkeyPatch() + try: + yield m + finally: + m.undo() + + def setattr(self, target, name, value=notset, raising=True): + """ Set attribute value on target, memorizing the old value. + By default raise AttributeError if the attribute did not exist. + + For convenience you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name. Example: + ``monkeypatch.setattr("os.getcwd", lambda: "/")`` + would set the ``getcwd`` function of the ``os`` module. + + The ``raising`` value determines if the setattr should fail + if the attribute is not already present (defaults to True + which means it will raise). + """ + __tracebackhide__ = True + import inspect + + if value is notset: + if not isinstance(target, six.string_types): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError("%r has no attribute %r" % (target, name)) + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr(self, target, name=notset, raising=True): + """ Delete attribute ``name`` from ``target``, by default raise + AttributeError it the attribute did not previously exist. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + If ``raising`` is set to False, no exception will be raised if the + attribute is missing. + """ + __tracebackhide__ = True + if name is notset: + if not isinstance(target, six.string_types): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + self._setattr.append((target, name, getattr(target, name, notset))) + delattr(target, name) + + def setitem(self, dic, name, value): + """ Set dictionary entry ``name`` to value. """ + self._setitem.append((dic, name, dic.get(name, notset))) + dic[name] = value + + def delitem(self, dic, name, raising=True): + """ Delete ``name`` from dict. Raise KeyError if it doesn't exist. + + If ``raising`` is set to False, no exception will be raised if the + key is missing. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + del dic[name] + + def _warn_if_env_name_is_not_str(self, name): + """On Python 2, warn if the given environment variable name is not a native str (#4056)""" + if six.PY2 and not isinstance(name, str): + warnings.warn( + pytest.PytestWarning( + "Environment variable name {!r} should be str".format(name) + ) + ) + + def setenv(self, name, value, prepend=None): + """ Set environment variable ``name`` to ``value``. If ``prepend`` + is a character, read the current environment variable value + and prepend the ``value`` adjoined with the ``prepend`` character.""" + if not isinstance(value, str): + warnings.warn( + pytest.PytestWarning( + "Environment variable value {!r} should be str, converted to str implicitly".format( + value + ) + ) + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self._warn_if_env_name_is_not_str(name) + self.setitem(os.environ, name, value) + + def delenv(self, name, raising=True): + """ Delete ``name`` from the environment. Raise KeyError if it does + not exist. + + If ``raising`` is set to False, no exception will be raised if the + environment variable is missing. + """ + self._warn_if_env_name_is_not_str(name) + self.delitem(os.environ, name, raising=raising) + + def syspath_prepend(self, path): + """ Prepend ``path`` to ``sys.path`` list of import locations. """ + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + def chdir(self, path): + """ Change the current working directory to the specified path. + Path can be a string or a py.path.local object. + """ + if self._cwd is None: + self._cwd = os.getcwd() + if hasattr(path, "chdir"): + path.chdir() + else: + os.chdir(path) + + def undo(self): + """ Undo previous changes. This call consumes the + undo stack. Calling it a second time has no effect unless + you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + Note that the same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, name, value in reversed(self._setitem): + if value is notset: + try: + del dictionary[name] + except KeyError: + pass # was already deleted, so we have the desired state + else: + dictionary[name] = value + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/monkeypatch.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/monkeypatch.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c45f61f2fa79ea70ef1195ff9f62c84d2a769b5f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/monkeypatch.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nodes.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..76d8d6a8aebc68dbe0614cd54880c2b34fab6a72 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nodes.py @@ -0,0 +1,535 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import warnings + +import attr +import py +import six + +import _pytest._code +from _pytest.compat import getfslineno +from _pytest.mark.structures import MarkInfo +from _pytest.mark.structures import NodeKeywords +from _pytest.outcomes import fail + +SEP = "/" + +tracebackcutdir = py.path.local(_pytest.__file__).dirpath() + + +def _splitnode(nodeid): + """Split a nodeid into constituent 'parts'. + + Node IDs are strings, and can be things like: + '' + 'testing/code' + 'testing/code/test_excinfo.py' + 'testing/code/test_excinfo.py::TestFormattedExcinfo::()' + + Return values are lists e.g. + [] + ['testing', 'code'] + ['testing', 'code', 'test_excinfo.py'] + ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()'] + """ + if nodeid == "": + # If there is no root node at all, return an empty list so the caller's logic can remain sane + return [] + parts = nodeid.split(SEP) + # Replace single last element 'test_foo.py::Bar::()' with multiple elements 'test_foo.py', 'Bar', '()' + parts[-1:] = parts[-1].split("::") + return parts + + +def ischildnode(baseid, nodeid): + """Return True if the nodeid is a child node of the baseid. + + E.g. 'foo/bar::Baz::()' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp' + """ + base_parts = _splitnode(baseid) + node_parts = _splitnode(nodeid) + if len(node_parts) < len(base_parts): + return False + return node_parts[: len(base_parts)] == base_parts + + +@attr.s +class _CompatProperty(object): + name = attr.ib() + + def __get__(self, obj, owner): + if obj is None: + return self + + from _pytest.deprecated import COMPAT_PROPERTY + + warnings.warn( + COMPAT_PROPERTY.format(name=self.name, owner=owner.__name__), stacklevel=2 + ) + return getattr(__import__("pytest"), self.name) + + +class Node(object): + """ base class for Collector and Item the test collection tree. + Collector subclasses have children, Items are terminal nodes.""" + + def __init__( + self, name, parent=None, config=None, session=None, fspath=None, nodeid=None + ): + #: a unique name within the scope of the parent node + self.name = name + + #: the parent collector node. + self.parent = parent + + #: the pytest config object + self.config = config or parent.config + + #: the session this node is part of + self.session = session or parent.session + + #: filesystem path where this node was collected from (can be None) + self.fspath = fspath or getattr(parent, "fspath", None) + + #: keywords/markers collected from all scopes + self.keywords = NodeKeywords(self) + + #: the marker objects belonging to this node + self.own_markers = [] + + #: allow adding of extra keywords to use for matching + self.extra_keyword_matches = set() + + # used for storing artificial fixturedefs for direct parametrization + self._name2pseudofixturedef = {} + + if nodeid is not None: + self._nodeid = nodeid + else: + assert parent is not None + self._nodeid = self.parent.nodeid + "::" + self.name + + @property + def ihook(self): + """ fspath sensitive hook proxy used to call pytest hooks""" + return self.session.gethookproxy(self.fspath) + + Module = _CompatProperty("Module") + Class = _CompatProperty("Class") + Instance = _CompatProperty("Instance") + Function = _CompatProperty("Function") + File = _CompatProperty("File") + Item = _CompatProperty("Item") + + def _getcustomclass(self, name): + maybe_compatprop = getattr(type(self), name) + if isinstance(maybe_compatprop, _CompatProperty): + return getattr(__import__("pytest"), name) + else: + from _pytest.deprecated import CUSTOM_CLASS + + cls = getattr(self, name) + self.warn(CUSTOM_CLASS.format(name=name, type_name=type(self).__name__)) + return cls + + def __repr__(self): + return "<%s %r>" % (self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, _code_or_warning=None, message=None, code=None): + """Issue a warning for this item. + + Warnings will be displayed after the test session, unless explicitly suppressed. + + This can be called in two forms: + + **Warning instance** + + This was introduced in pytest 3.8 and uses the standard warning mechanism to issue warnings. + + .. code-block:: python + + node.warn(PytestWarning("some message")) + + The warning instance must be a subclass of :class:`pytest.PytestWarning`. + + **code/message (deprecated)** + + This form was used in pytest prior to 3.8 and is considered deprecated. Using this form will emit another + warning about the deprecation: + + .. code-block:: python + + node.warn("CI", "some message") + + :param Union[Warning,str] _code_or_warning: + warning instance or warning code (legacy). This parameter receives an underscore for backward + compatibility with the legacy code/message form, and will be replaced for something + more usual when the legacy form is removed. + + :param Union[str,None] message: message to display when called in the legacy form. + :param str code: code for the warning, in legacy form when using keyword arguments. + :return: + """ + if message is None: + if _code_or_warning is None: + raise ValueError("code_or_warning must be given") + self._std_warn(_code_or_warning) + else: + if _code_or_warning and code: + raise ValueError( + "code_or_warning and code cannot both be passed to this function" + ) + code = _code_or_warning or code + self._legacy_warn(code, message) + + def _legacy_warn(self, code, message): + """ + .. deprecated:: 3.8 + + Use :meth:`Node.std_warn <_pytest.nodes.Node.std_warn>` instead. + + Generate a warning with the given code and message for this item. + """ + from _pytest.deprecated import NODE_WARN + + self._std_warn(NODE_WARN) + + assert isinstance(code, str) + fslocation = get_fslocation_from_item(self) + self.ihook.pytest_logwarning.call_historic( + kwargs=dict( + code=code, message=message, nodeid=self.nodeid, fslocation=fslocation + ) + ) + + def _std_warn(self, warning): + """Issue a warning for this item. + + Warnings will be displayed after the test session, unless explicitly suppressed + + :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning. + + :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning. + """ + from _pytest.warning_types import PytestWarning + + if not isinstance(warning, PytestWarning): + raise ValueError( + "warning must be an instance of PytestWarning or subclass, got {!r}".format( + warning + ) + ) + path, lineno = get_fslocation_from_item(self) + warnings.warn_explicit( + warning, + category=None, + filename=str(path), + lineno=lineno + 1 if lineno is not None else None, + ) + + # methods for ordering nodes + @property + def nodeid(self): + """ a ::-separated string denoting its collection tree address. """ + return self._nodeid + + def __hash__(self): + return hash(self.nodeid) + + def setup(self): + pass + + def teardown(self): + pass + + def listchain(self): + """ return list of all parent collectors up to self, + starting from root of collection tree. """ + chain = [] + item = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker(self, marker, append=True): + """dynamically add a marker object to the node. + + :type marker: ``str`` or ``pytest.mark.*`` object + :param marker: + ``append=True`` whether to append the marker, + if ``False`` insert at position ``0``. + """ + from _pytest.mark import MarkDecorator, MARK_GEN + + if isinstance(marker, six.string_types): + marker = getattr(MARK_GEN, marker) + elif not isinstance(marker, MarkDecorator): + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker.name] = marker + if append: + self.own_markers.append(marker.mark) + else: + self.own_markers.insert(0, marker.mark) + + def iter_markers(self, name=None): + """ + :param name: if given, filter the results by the name attribute + + iterate over all markers of the node + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node(self, name=None): + """ + :param name: if given, filter the results by the name attribute + + iterate over all markers of the node + returns sequence of tuples (node, mark) + """ + for node in reversed(self.listchain()): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + def get_closest_marker(self, name, default=None): + """return the first marker matching the name, from closest (for example function) to farther level (for example + module level). + + :param default: fallback return value of no marker was found + :param name: name to filter by + """ + return next(self.iter_markers(name=name), default) + + def get_marker(self, name): + """ get a marker object from this node or None if + the node doesn't have a marker with that name. + + .. deprecated:: 3.6 + This function has been deprecated in favor of + :meth:`Node.get_closest_marker <_pytest.nodes.Node.get_closest_marker>` and + :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>`, see :ref:`update marker code` + for more details. + """ + markers = list(self.iter_markers(name=name)) + if markers: + return MarkInfo(markers) + + def listextrakeywords(self): + """ Return a set of all extra keywords in self and any parents.""" + extra_keywords = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self): + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin): + """ register a function to be called when this node is finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls): + """ get the next parent node (including ourself) + which is an instance of the given class""" + current = self + while current and not isinstance(current, cls): + current = current.parent + return current + + def _prunetraceback(self, excinfo): + pass + + def _repr_failure_py(self, excinfo, style=None): + if excinfo.errisinstance(fail.Exception): + if not excinfo.value.pytrace: + return six.text_type(excinfo.value) + fm = self.session._fixturemanager + if excinfo.errisinstance(fm.FixtureLookupError): + return excinfo.value.formatrepr() + tbfilter = True + if self.config.option.fulltrace: + style = "long" + else: + tb = _pytest._code.Traceback([excinfo.traceback[-1]]) + self._prunetraceback(excinfo) + if len(excinfo.traceback) == 0: + excinfo.traceback = tb + tbfilter = False # prunetraceback already does it + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.option.tbstyle == "short": + style = "short" + else: + style = "long" + + if self.config.option.verbose > 1: + truncate_locals = False + else: + truncate_locals = True + + try: + os.getcwd() + abspath = False + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.option.showlocals, + style=style, + tbfilter=tbfilter, + truncate_locals=truncate_locals, + ) + + repr_failure = _repr_failure_py + + +def get_fslocation_from_item(item): + """Tries to extract the actual location from an item, depending on available attributes: + + * "fslocation": a pair (path, lineno) + * "obj": a Python object that the item wraps. + * "fspath": just a path + + :rtype: a tuple of (str|LocalPath, int) with filename and line number. + """ + result = getattr(item, "location", None) + if result is not None: + return result[:2] + obj = getattr(item, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(item, "fspath", "unknown location"), -1 + + +class Collector(Node): + """ Collector instances create children through collect() + and thus iteratively build a tree. + """ + + class CollectError(Exception): + """ an error during collection, contains a custom message. """ + + def collect(self): + """ returns a list of children (items and collectors) + for this collection node. + """ + raise NotImplementedError("abstract") + + def repr_failure(self, excinfo): + """ represent a collection failure. """ + if excinfo.errisinstance(self.CollectError): + exc = excinfo.value + return str(exc.args[0]) + return self._repr_failure_py(excinfo, style="short") + + def _prunetraceback(self, excinfo): + if hasattr(self, "fspath"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.fspath) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + excinfo.traceback = ntraceback.filter() + + +def _check_initialpaths_for_relpath(session, fspath): + for initial_path in session._initialpaths: + if fspath.common(initial_path) == initial_path: + return fspath.relto(initial_path.dirname) + + +class FSCollector(Collector): + def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): + fspath = py.path.local(fspath) # xxx only for test_resultlog.py? + name = fspath.basename + if parent is not None: + rel = fspath.relto(parent.fspath) + if rel: + name = rel + name = name.replace(os.sep, SEP) + self.fspath = fspath + + session = session or parent.session + + if nodeid is None: + nodeid = self.fspath.relto(session.config.rootdir) + + if not nodeid: + nodeid = _check_initialpaths_for_relpath(session, fspath) + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super(FSCollector, self).__init__( + name, parent, config, session, nodeid=nodeid, fspath=fspath + ) + + +class File(FSCollector): + """ base class for collecting tests from a file. """ + + +class Item(Node): + """ a basic test invocation item. Note that for a single function + there might be multiple test invocation items. + """ + + nextitem = None + + def __init__(self, name, parent=None, config=None, session=None, nodeid=None): + super(Item, self).__init__(name, parent, config, session, nodeid=nodeid) + self._report_sections = [] + + #: user properties is a list of tuples (name, value) that holds user + #: defined properties for this test. + self.user_properties = [] + + def add_report_section(self, when, key, content): + """ + Adds a new report section, similar to what's done internally to add stdout and + stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self): + return self.fspath, None, "" + + @property + def location(self): + try: + return self._location + except AttributeError: + location = self.reportinfo() + # bestrelpath is a quite slow function + cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) + try: + fspath = cache[location[0]] + except KeyError: + fspath = self.session.fspath.bestrelpath(location[0]) + cache[location[0]] = fspath + location = (fspath, location[1], str(location[2])) + self._location = location + return location diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nodes.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nodes.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2d1819468116c8ecb85b874c7ccc370ad5ae7f9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nodes.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nose.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nose.py new file mode 100644 index 0000000000000000000000000000000000000000..4bfa9c5838b3e79d6f4500313c74b277eeeb04e6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nose.py @@ -0,0 +1,76 @@ +""" run test suites written for nose. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys + +from _pytest import python +from _pytest import runner +from _pytest import unittest +from _pytest.config import hookimpl + + +def get_skip_exceptions(): + skip_classes = set() + for module_name in ("unittest", "unittest2", "nose"): + mod = sys.modules.get(module_name) + if hasattr(mod, "SkipTest"): + skip_classes.add(mod.SkipTest) + return tuple(skip_classes) + + +def pytest_runtest_makereport(item, call): + if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): + # let's substitute the excinfo with a pytest.skip one + call2 = call.__class__(lambda: runner.skip(str(call.excinfo.value)), call.when) + call.excinfo = call2.excinfo + + +@hookimpl(trylast=True) +def pytest_runtest_setup(item): + if is_potential_nosetest(item): + if isinstance(item.parent, python.Generator): + gen = item.parent + if not hasattr(gen, "_nosegensetup"): + call_optional(gen.obj, "setup") + if isinstance(gen.parent, python.Instance): + call_optional(gen.parent.obj, "setup") + gen._nosegensetup = True + if not call_optional(item.obj, "setup"): + # call module level setup if there is no object level one + call_optional(item.parent.obj, "setup") + # XXX this implies we only call teardown when setup worked + item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) + + +def teardown_nose(item): + if is_potential_nosetest(item): + if not call_optional(item.obj, "teardown"): + call_optional(item.parent.obj, "teardown") + # if hasattr(item.parent, '_nosegensetup'): + # #call_optional(item._nosegensetup, 'teardown') + # del item.parent._nosegensetup + + +def pytest_make_collect_report(collector): + if isinstance(collector, python.Generator): + call_optional(collector.obj, "setup") + + +def is_potential_nosetest(item): + # extra check needed since we do not do nose style setup/teardown + # on direct unittest style classes + return isinstance(item, python.Function) and not isinstance( + item, unittest.TestCaseFunction + ) + + +def call_optional(obj, name): + method = getattr(obj, name, None) + isfixture = hasattr(method, "_pytestfixturefunction") + if method is not None and not isfixture and callable(method): + # If there's any problems allow the exception to raise rather than + # silently ignoring them + method() + return True diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nose.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nose.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba62fb1a8adbbc7bc10673f925c2c057b0d2e844 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/nose.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/outcomes.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/outcomes.py new file mode 100644 index 0000000000000000000000000000000000000000..cd08c0d48e4783fbf95bf8c2ce3810406e00ee89 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/outcomes.py @@ -0,0 +1,182 @@ +""" +exception classes and constants handling test outcomes +as well as functions creating them +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys + + +class OutcomeException(BaseException): + """ OutcomeException and its subclass instances indicate and + contain info about test and collection outcomes. + """ + + def __init__(self, msg=None, pytrace=True): + BaseException.__init__(self, msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self): + if self.msg: + val = self.msg + if isinstance(val, bytes): + val = val.decode("UTF-8", errors="replace") + return val + return "<%s instance>" % (self.__class__.__name__,) + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__(self, msg=None, pytrace=True, allow_module_level=False): + OutcomeException.__init__(self, msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + + +class Failed(OutcomeException): + """ raised from an explicit call to pytest.fail() """ + + __module__ = "builtins" + + +class Exit(KeyboardInterrupt): + """ raised for immediate program exits (no tracebacks/summaries)""" + + def __init__(self, msg="unknown reason", returncode=None): + self.msg = msg + self.returncode = returncode + KeyboardInterrupt.__init__(self, msg) + + +# exposed helper methods + + +def exit(msg, returncode=None): + """ + Exit testing process as if KeyboardInterrupt was triggered. + + :param str msg: message to display upon exit. + :param int returncode: return code to be used when exiting pytest. + """ + __tracebackhide__ = True + raise Exit(msg, returncode) + + +exit.Exception = Exit + + +def skip(msg="", **kwargs): + """ + Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. + + :kwarg bool allow_module_level: allows this function to be called at + module level, skipping the rest of the module. Default to False. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be + skipped under certain conditions like mismatching platforms or + dependencies. + """ + __tracebackhide__ = True + allow_module_level = kwargs.pop("allow_module_level", False) + if kwargs: + keys = [k for k in kwargs.keys()] + raise TypeError("unexpected keyword arguments: {}".format(keys)) + raise Skipped(msg=msg, allow_module_level=allow_module_level) + + +skip.Exception = Skipped + + +def fail(msg="", pytrace=True): + """ + Explicitly fail an executing test with the given message. + + :param str msg: the message to show the user as reason for the failure. + :param bool pytrace: if false the msg represents the full failure information and no + python traceback will be reported. + """ + __tracebackhide__ = True + raise Failed(msg=msg, pytrace=pytrace) + + +fail.Exception = Failed + + +class XFailed(fail.Exception): + """ raised from an explicit call to pytest.xfail() """ + + +def xfail(reason=""): + """ + Imperatively xfail an executing test or setup functions with the given reason. + + This function should be called only during testing (setup, call or teardown). + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be + xfailed under certain conditions like known bugs or missing features. + """ + __tracebackhide__ = True + raise XFailed(reason) + + +xfail.Exception = XFailed + + +def importorskip(modname, minversion=None): + """ return imported module if it has at least "minversion" as its + __version__ attribute. If no minversion is specified the a skip + is only triggered if the module can not be imported. + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + should_skip = False + + with warnings.catch_warnings(): + # make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file + warnings.simplefilter("ignore") + try: + __import__(modname) + except ImportError: + # Do not raise chained exception here(#1485) + should_skip = True + if should_skip: + raise Skipped("could not import %r" % (modname,), allow_module_level=True) + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + try: + from pkg_resources import parse_version as pv + except ImportError: + raise Skipped( + "we have a required version for %r but can not import " + "pkg_resources to parse version strings." % (modname,), + allow_module_level=True, + ) + if verattr is None or pv(verattr) < pv(minversion): + raise Skipped( + "module %r has __version__ %r, required is: %r" + % (modname, verattr, minversion), + allow_module_level=True, + ) + return mod diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/outcomes.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/outcomes.pyc new file mode 100644 index 0000000000000000000000000000000000000000..429bb26be3d5e6d1519255c40ad8cf37b1a19017 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/outcomes.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pastebin.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pastebin.py new file mode 100644 index 0000000000000000000000000000000000000000..9559e3265a26b36c93733509b4a5cff1c828f930 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pastebin.py @@ -0,0 +1,113 @@ +""" submit failure or test session information to a pastebin service. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import tempfile + +import six + +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group._addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="send failed|all info to bpaste.net pastebin service.", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config): + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # if no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a slave node + # when using pytest-xdist, for example + if tr is not None: + # pastebin file will be utf-8 encoded binary file + config._pastebinfile = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, six.text_type): + s = s.encode("utf-8") + config._pastebinfile.write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config): + if hasattr(config, "_pastebinfile"): + # get terminal contents and delete file + config._pastebinfile.seek(0) + sessionlog = config._pastebinfile.read() + config._pastebinfile.close() + del config._pastebinfile + # undo our patching in the terminal reporter + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # write summary + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line("pastebin session-log: %s\n" % pastebinurl) + + +def create_new_paste(contents): + """ + Creates a new paste using bpaste.net service. + + :contents: paste contents as utf-8 encoded bytes + :returns: url to the pasted contents + """ + import re + + if sys.version_info < (3, 0): + from urllib import urlopen, urlencode + else: + from urllib.request import urlopen + from urllib.parse import urlencode + + params = { + "code": contents, + "lexer": "python3" if sys.version_info[0] == 3 else "python", + "expiry": "1week", + } + url = "https://bpaste.net" + response = urlopen(url, data=urlencode(params).encode("ascii")).read() + m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8")) + if m: + return "%s/show/%s" % (url, m.group(1)) + else: + return "bad response: " + response + + +def pytest_terminal_summary(terminalreporter): + import _pytest.config + + if terminalreporter.config.option.pastebin != "failed": + return + tr = terminalreporter + if "failed" in tr.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats.get("failed"): + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = tr._getfailureheadline(rep) + tw = _pytest.config.create_terminal_writer( + terminalreporter.config, stringio=True + ) + rep.toterminal(tw) + s = tw.stringio.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + tr.write_line("%s --> %s" % (msg, pastebinurl)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pastebin.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pastebin.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb13ccdabcbdce9dee3e1b0f60d64bcd13028016 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pastebin.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pathlib.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pathlib.py new file mode 100644 index 0000000000000000000000000000000000000000..7cf3f40b62dbc579e51fa4ec6f544dd2354bf782 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pathlib.py @@ -0,0 +1,313 @@ +import atexit +import errno +import fnmatch +import itertools +import operator +import os +import shutil +import stat +import sys +import uuid +from functools import reduce +from os.path import expanduser +from os.path import expandvars +from os.path import isabs +from os.path import sep +from posixpath import sep as posix_sep + +import six +from six.moves import map + +from .compat import PY36 + + +if PY36: + from pathlib import Path, PurePath +else: + from pathlib2 import Path, PurePath + +__all__ = ["Path", "PurePath"] + + +LOCK_TIMEOUT = 60 * 60 * 3 + +get_lock_path = operator.methodcaller("joinpath", ".lock") + + +def ensure_reset_dir(path): + """ + ensures the given path is a empty directory + """ + if path.exists(): + rmtree(path, force=True) + path.mkdir() + + +def _shutil_rmtree_remove_writable(func, fspath, _): + "Clear the readonly bit and reattempt the removal" + os.chmod(fspath, stat.S_IWRITE) + func(fspath) + + +def rmtree(path, force=False): + if force: + # ignore_errors leaves dead folders around + # python needs a rm -rf as a followup + # the trick with _shutil_rmtree_remove_writable is unreliable + shutil.rmtree(str(path), ignore_errors=True) + else: + shutil.rmtree(str(path)) + + +def find_prefixed(root, prefix): + """finds all elements in root that begin with the prefix, case insensitive""" + l_prefix = prefix.lower() + for x in root.iterdir(): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter, prefix): + """ + :param iter: iterator over path names + :param prefix: expected prefix of the path names + :returns: the parts of the paths following the prefix + """ + p_len = len(prefix) + for p in iter: + yield p.name[p_len:] + + +def find_suffixes(root, prefix): + """combines find_prefixes and extract_suffixes + """ + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num): + """parses number path suffixes, returns -1 on error""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +if six.PY2: + + def _max(iterable, default): + """needed due to python2.7 lacking the default argument for max""" + return reduce(max, iterable, default) + + +else: + _max = max + + +def _force_symlink(root, target, link_to): + """helper to create the current symlink + + its full of race conditions that are reasonably ok to ignore + for the contex of best effort linking to the latest testrun + + the presumption being thatin case of much parallelism + the inaccuracy is going to be acceptable + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root, prefix): + """create a directory with a increased number as suffix for the given prefix""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath("{}{}".format(prefix, new_number)) + try: + new_path.mkdir() + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise EnvironmentError( + "could not create numbered dir with prefix " + "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root) + ) + + +def create_cleanup_lock(p): + """crates a lock to prevent premature folder cleanup""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except OSError as e: + if e.errno == errno.EEXIST: + six.raise_from( + EnvironmentError("cannot create lockfile in {path}".format(path=p)), e + ) + else: + raise + else: + pid = os.getpid() + spid = str(pid) + if not isinstance(spid, bytes): + spid = spid.encode("ascii") + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise EnvironmentError("lock path got renamed after sucessfull creation") + return lock_path + + +def register_cleanup_lock_removal(lock_path, register=atexit.register): + """registers a cleanup function for removing a lock, by default on atexit""" + pid = os.getpid() + + def cleanup_on_exit(lock_path=lock_path, original_pid=pid): + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except (OSError, IOError): + pass + + return register(cleanup_on_exit) + + +def maybe_delete_a_numbered_dir(path): + """removes a numbered directory if its lock can be obtained""" + try: + create_cleanup_lock(path) + except (OSError, EnvironmentError): + # known races: + # * other process did a cleanup at the same time + # * deletable folder was found + return + parent = path.parent + + garbage = parent.joinpath("garbage-{}".format(uuid.uuid4())) + path.rename(garbage) + rmtree(garbage, force=True) + + +def ensure_deletable(path, consider_lock_dead_if_created_before): + """checks if a lock exists and breaks it if its considered dead""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + if not lock.exists(): + return True + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + lock.unlink() + return True + else: + return False + + +def try_cleanup(path, consider_lock_dead_if_created_before): + """tries to cleanup a folder if we can ensure its deletable""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + maybe_delete_a_numbered_dir(path) + + +def cleanup_candidates(root, prefix, keep): + """lists candidates for numbered directories to be removed - follows py.path""" + max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + paths = find_prefixed(root, prefix) + paths, paths2 = itertools.tee(paths) + numbers = map(parse_num, extract_suffixes(paths2, prefix)) + for path, number in zip(paths, numbers): + if number <= max_delete: + yield path + + +def cleanup_numbered_dir(root, prefix, keep, consider_lock_dead_if_created_before): + """cleanup for lock driven numbered directories""" + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + +def make_numbered_dir_with_cleanup(root, prefix, keep, lock_timeout): + """creates a numbered dir with a cleanup lock and removes old ones""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix) + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as exc: + e = exc + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + cleanup_numbered_dir( + root=root, + prefix=prefix, + keep=keep, + consider_lock_dead_if_created_before=consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input, root): + assert not isinstance(input, Path), "would break on py2" + root = Path(root) + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return root.joinpath(input) + + +def fnmatch_ex(pattern, path): + """FNMatcher port from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions + for each part of the path, while this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with + PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according + this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = six.text_type(path) + return fnmatch.fnmatch(name, pattern) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pathlib.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pathlib.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c48482f038ad6598525d35d9fd079291c20ed26 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pathlib.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pytester.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pytester.py new file mode 100644 index 0000000000000000000000000000000000000000..ca24ff1b0f4a65c70fea5d0497650d353a0c9c83 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pytester.py @@ -0,0 +1,1385 @@ +"""(disabled by default) support for testing pytest and pytest plugins.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import codecs +import gc +import os +import platform +import re +import subprocess +import sys +import time +import traceback +from fnmatch import fnmatch +from weakref import WeakKeyDictionary + +import py +import six + +import pytest +from _pytest._code import Source +from _pytest.assertion.rewrite import AssertionRewritingHook +from _pytest.capture import MultiCapture +from _pytest.capture import SysCapture +from _pytest.compat import safe_str +from _pytest.main import EXIT_INTERRUPTED +from _pytest.main import EXIT_OK +from _pytest.main import Session +from _pytest.pathlib import Path + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + u"/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser): + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="directory to take the pytester example files from" + ) + + +def pytest_configure(config): + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + +def raise_on_kwargs(kwargs): + if kwargs: + raise TypeError("Unexpected arguments: {}".format(", ".join(sorted(kwargs)))) + + +class LsofFdLeakChecker(object): + def get_open_files(self): + out = self._exec_lsof() + open_files = self._parse_lsof_output(out) + return open_files + + def _exec_lsof(self): + pid = os.getpid() + return py.process.cmdexec("lsof -Ffn0 -p %d" % pid) + + def _parse_lsof_output(self, out): + def isopen(line): + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self): + try: + py.process.cmdexec("lsof -v") + except (py.process.cmdexec.Error, UnicodeDecodeError): + # cmdexec may raise UnicodeDecodeError on Windows systems with + # locale other than English: + # https://bitbucket.org/pytest-dev/py/issues/66 + return False + else: + return True + + @pytest.hookimpl(hookwrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item): + lines1 = self.get_open_files() + yield + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [] + error.append("***** %s FD leakage detected" % len(leaked_files)) + error.extend([str(f) for f in leaked_files]) + error.append("*** Before:") + error.extend([str(f) for f in lines1]) + error.append("*** After:") + error.extend([str(f) for f in lines2]) + error.append(error[0]) + error.append("*** function %s:%s: %s " % item.location) + error.append("See issue #2366") + item.warn(pytest.PytestWarning("\n".join(error))) + + +# XXX copied from execnet's conftest.py - needs to be merged +winpymap = { + "python2.7": r"C:\Python27\python.exe", + "python3.4": r"C:\Python34\python.exe", + "python3.5": r"C:\Python35\python.exe", + "python3.6": r"C:\Python36\python.exe", +} + + +def getexecutable(name, cache={}): + try: + return cache[name] + except KeyError: + executable = py.path.local.sysfind(name) + if executable: + import subprocess + + popen = subprocess.Popen( + [str(executable), "--version"], + universal_newlines=True, + stderr=subprocess.PIPE, + ) + out, err = popen.communicate() + if name == "jython": + if not err or "2.5" not in err: + executable = None + if "2.5.2" in err: + executable = None # http://bugs.jython.org/issue1790 + elif popen.returncode != 0: + # handle pyenv's 127 + executable = None + cache[name] = executable + return executable + + +@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"]) +def anypython(request): + name = request.param + executable = getexecutable(name) + if executable is None: + if sys.platform == "win32": + executable = winpymap.get(name, None) + if executable: + executable = py.path.local(executable) + if executable.check(): + return executable + pytest.skip("no suitable %s found" % (name,)) + return executable + + +# used at least by pytest-xdist plugin + + +@pytest.fixture +def _pytest(request): + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks. + + """ + return PytestArg(request) + + +class PytestArg(object): + def __init__(self, request): + self.request = request + + def gethookrecorder(self, hook): + hookrecorder = HookRecorder(hook._pm) + self.request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values): + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +class ParsedCall(object): + def __init__(self, name, kwargs): + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self): + d = self.__dict__.copy() + del d["_name"] + return "" % (self._name, d) + + +class HookRecorder(object): + """Record all hooks called in a plugin manager. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + + """ + + def __init__(self, pluginmanager): + self._pluginmanager = pluginmanager + self.calls = [] + + def before(hook_name, hook_impls, kwargs): + self.calls.append(ParsedCall(hook_name, kwargs)) + + def after(outcome, hook_name, hook_impls, kwargs): + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self): + self._undo_wrapping() + + def getcalls(self, names): + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries): + __tracebackhide__ = True + i = 0 + entries = list(entries) + backlocals = sys._getframe(1).f_locals + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + pytest.fail("could not find %r check %r" % (name, check)) + + def popcall(self, name): + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = ["could not find call %r, in:" % (name,)] + lines.extend([" %s" % x for x in self.calls]) + pytest.fail("\n".join(lines)) + + def getcall(self, name): + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart="", + names="pytest_runtest_logreport pytest_collectreport", + when=None, + ): + """return a testreport whose dotted import path matches""" + values = [] + for rep in self.getreports(names=names): + try: + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + except AttributeError: + pass + if when and getattr(rep, "when", None) != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + "could not find test report matching %r: " + "no test reports at all!" % (inamepart,) + ) + if len(values) > 1: + raise ValueError( + "found 2 or more testreports matching %r: %s" % (inamepart, values) + ) + return values[0] + + def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"): + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self): + return self.getfailures("pytest_collectreport") + + def listoutcomes(self): + passed = [] + skipped = [] + failed = [] + for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"): + if rep.passed: + if getattr(rep, "when", None) == "call": + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + elif rep.failed: + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self): + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed=0, skipped=0, failed=0): + realpassed, realskipped, realfailed = self.listoutcomes() + assert passed == len(realpassed) + assert skipped == len(realskipped) + assert failed == len(realfailed) + + def clear(self): + self.calls[:] = [] + + +@pytest.fixture +def linecomp(request): + return LineComp() + + +@pytest.fixture(name="LineMatcher") +def LineMatcher_fixture(request): + return LineMatcher + + +@pytest.fixture +def testdir(request, tmpdir_factory): + return Testdir(request, tmpdir_factory) + + +rex_outcome = re.compile(r"(\d+) ([\w-]+)") + + +class RunResult(object): + """The result of running a command. + + Attributes: + + :ret: the return value + :outlines: list of lines captured from stdout + :errlines: list of lines captures from stderr + :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to + reconstruct stdout or the commonly used ``stdout.fnmatch_lines()`` + method + :stderr: :py:class:`LineMatcher` of stderr + :duration: duration in seconds + + """ + + def __init__(self, ret, outlines, errlines, duration): + self.ret = ret + self.outlines = outlines + self.errlines = errlines + self.stdout = LineMatcher(outlines) + self.stderr = LineMatcher(errlines) + self.duration = duration + + def parseoutcomes(self): + """Return a dictionary of outcomestring->num from parsing the terminal + output that the test process produced. + + """ + for line in reversed(self.outlines): + if "seconds" in line: + outcomes = rex_outcome.findall(line) + if outcomes: + d = {} + for num, cat in outcomes: + d[cat] = int(num) + return d + raise ValueError("Pytest terminal report not found") + + def assert_outcomes( + self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0 + ): + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + """ + d = self.parseoutcomes() + obtained = { + "passed": d.get("passed", 0), + "skipped": d.get("skipped", 0), + "failed": d.get("failed", 0), + "error": d.get("error", 0), + "xpassed": d.get("xpassed", 0), + "xfailed": d.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "error": error, + "xpassed": xpassed, + "xfailed": xfailed, + } + assert obtained == expected + + +class CwdSnapshot(object): + def __init__(self): + self.__saved = os.getcwd() + + def restore(self): + os.chdir(self.__saved) + + +class SysModulesSnapshot(object): + def __init__(self, preserve=None): + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self): + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot(object): + def __init__(self): + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self): + sys.path[:], sys.meta_path[:] = self.__saved + + +class Testdir(object): + """Temporary test directory with tools to test/run pytest itself. + + This is based on the ``tmpdir`` fixture but provides a number of methods + which aid with testing pytest itself. Unless :py:meth:`chdir` is used all + methods will use :py:attr:`tmpdir` as their current working directory. + + Attributes: + + :tmpdir: The :py:class:`py.path.local` instance of the temporary directory. + + :plugins: A list of plugins to use with :py:meth:`parseconfig` and + :py:meth:`runpytest`. Initially this is an empty list but plugins can + be added to the list. The type of items to add to the list depends on + the method using them so refer to them for details. + + """ + + class TimeoutExpired(Exception): + pass + + def __init__(self, request, tmpdir_factory): + self.request = request + self._mod_collections = WeakKeyDictionary() + name = request.function.__name__ + self.tmpdir = tmpdir_factory.mktemp(name, numbered=True) + self.test_tmproot = tmpdir_factory.mktemp("tmp-" + name, numbered=True) + os.environ["PYTEST_DEBUG_TEMPROOT"] = str(self.test_tmproot) + self.plugins = [] + self._cwd_snapshot = CwdSnapshot() + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self.chdir() + self.request.addfinalizer(self.finalize) + method = self.request.config.getoption("--runpytest") + if method == "inprocess": + self._runpytest_method = self.runpytest_inprocess + elif method == "subprocess": + self._runpytest_method = self.runpytest_subprocess + + def __repr__(self): + return "" % (self.tmpdir,) + + def finalize(self): + """Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + self._cwd_snapshot.restore() + os.environ.pop("PYTEST_DEBUG_TEMPROOT", None) + + def __take_sys_modules_snapshot(self): + # some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example + def preserve_module(name): + return name.startswith("zope") + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager): + """Create a new :py:class:`HookRecorder` for a PluginManager.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager) + self.request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self): + """Cd into the temporary directory. + + This is done automatically upon instantiation. + + """ + self.tmpdir.chdir() + + def _makefile(self, ext, args, kwargs, encoding="utf-8"): + items = list(kwargs.items()) + + def to_text(s): + return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s) + + if args: + source = u"\n".join(to_text(x) for x in args) + basename = self.request.function.__name__ + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.tmpdir.join(basename).new(ext=ext) + p.dirpath().ensure_dir() + source = Source(value) + source = u"\n".join(to_text(line) for line in source.lines) + p.write(source.strip().encode(encoding), "wb") + if ret is None: + ret = p + return ret + + def makefile(self, ext, *args, **kwargs): + r"""Create new file(s) in the testdir. + + :param str ext: The extension the file(s) should use, including the dot, e.g. `.py`. + :param list[str] args: All args will be treated as strings and joined using newlines. + The result will be written as contents to the file. The name of the + file will be based on the test function requesting this fixture. + :param kwargs: Each keyword is the name of a file, while the value of it will + be written as contents of the file. + + Examples: + + .. code-block:: python + + testdir.makefile(".txt", "line1", "line2") + + testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source): + """Write a contest.py file with 'source' as contents.""" + return self.makepyfile(conftest=source) + + def makeini(self, source): + """Write a tox.ini file with 'source' as contents.""" + return self.makefile(".ini", tox=source) + + def getinicfg(self, source): + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return py.iniconfig.IniConfig(p)["pytest"] + + def makepyfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .py extension.""" + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs): + """Shortcut for .makefile() with a .txt extension.""" + return self._makefile(".txt", args, kwargs) + + def syspathinsert(self, path=None): + """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. + + This is undone automatically when this object dies at the end of each + test. + + """ + if path is None: + path = self.tmpdir + sys.path.insert(0, str(path)) + # a call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches + self._possibly_invalidate_import_caches() + + def _possibly_invalidate_import_caches(self): + # invalidate caches if we can (py33 and above) + try: + import importlib + except ImportError: + pass + else: + if hasattr(importlib, "invalidate_caches"): + importlib.invalidate_caches() + + def mkdir(self, name): + """Create a new (sub)directory.""" + return self.tmpdir.mkdir(name) + + def mkpydir(self, name): + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a python package. + + """ + p = self.mkdir(name) + p.ensure("__init__.py") + return p + + def copy_example(self, name=None): + import warnings + from _pytest.warning_types import PYTESTER_COPY_EXAMPLE + + warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2) + example_dir = self.request.config.getini("pytester_example_dir") + if example_dir is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir = self.request.config.rootdir.join(example_dir) + + for extra_element in self.request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.join(*extra_element.args) + + if name is None: + func_name = self.request.function.__name__ + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.isdir(): + example_path = maybe_dir + elif maybe_file.isfile(): + example_path = maybe_file + else: + raise LookupError( + "{} cant be found as module or package in {}".format( + func_name, example_dir.bestrelpath(self.request.confg.rootdir) + ) + ) + else: + example_path = example_dir.join(name) + + if example_path.isdir() and not example_path.join("__init__.py").isfile(): + example_path.copy(self.tmpdir) + return self.tmpdir + elif example_path.isfile(): + result = self.tmpdir.join(example_path.basename) + example_path.copy(result) + return result + else: + raise LookupError( + 'example "{}" is not found as a file or directory'.format(example_path) + ) + + Session = Session + + def getnode(self, config, arg): + """Return the collection node of a file. + + :param config: :py:class:`_pytest.config.Config` instance, see + :py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the + configuration + + :param arg: a :py:class:`py.path.local` instance of the file + + """ + session = Session(config) + assert "::" not in str(arg) + p = py.path.local(arg) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res + + def getpathnode(self, path): + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param path: a :py:class:`py.path.local` instance of the file + + """ + config = self.parseconfigure(path) + session = Session(config) + x = session.fspath.bestrelpath(path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) + return res + + def genitems(self, colitems): + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + + """ + session = colitems[0].session + result = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source): + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + :py:func:`_pytest.runner.runtestprotocol`. + + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self.request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source, *cmdlineargs): + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: the source code of the test module + + :param cmdlineargs: any extra command line arguments to use + + :return: :py:class:`HookRecorder` instance of the result + + """ + p = self.makepyfile(source) + values = list(cmdlineargs) + [p] + return self.inline_run(*values) + + def inline_genitems(self, *args): + """Run ``pytest.main(['--collectonly'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run(self, *args, **kwargs): + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: command line arguments to pass to :py:func:`pytest.main` + + :param plugin: (keyword-only) extra plugin instances the + ``pytest.main()`` instance should use + + :return: a :py:class:`HookRecorder` instance + + """ + finalizers = [] + try: + # When running pytest inline any plugins active in the main test + # process are already imported. So this disables the warning which + # will trigger to say they can no longer be rewritten, which is + # fine as they have already been rewritten. + orig_warn = AssertionRewritingHook._warn_already_imported + + def revert_warn_already_imported(): + AssertionRewritingHook._warn_already_imported = orig_warn + + finalizers.append(revert_warn_already_imported) + AssertionRewritingHook._warn_already_imported = lambda *a: None + + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class Collect(object): + def pytest_configure(x, config): + rec.append(self.make_hook_recorder(config.pluginmanager)) + + plugins = kwargs.get("plugins") or [] + plugins.append(Collect()) + ret = pytest.main(list(args), plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec(object): + pass + + reprec.ret = ret + + # typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing + if ret == EXIT_INTERRUPTED and not kwargs.get("no_reraise_ctrlc"): + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess(self, *args, **kwargs): + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides. + + """ + if kwargs.get("syspathinsert"): + self.syspathinsert() + now = time.time() + capture = MultiCapture(Capture=SysCapture) + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + + class reprec(object): + ret = e.args[0] + + except Exception: + traceback.print_exc() + + class reprec(object): + ret = 3 + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now) + res.reprec = reprec + return res + + def runpytest(self, *args, **kwargs): + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`RunResult`. + + """ + args = self._ensure_basetemp(args) + return self._runpytest_method(*args, **kwargs) + + def _ensure_basetemp(self, args): + args = list(args) + for x in args: + if safe_str(x).startswith("--basetemp"): + break + else: + args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp")) + return args + + def parseconfig(self, *args): + """Return a new pytest Config instance from given commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create + a new :py:class:`_pytest.core.PluginManager` and call the + pytest_cmdline_parse hook to create a new + :py:class:`_pytest.config.Config` instance. + + If :py:attr:`plugins` has been populated they should be plugin modules + to be registered with the PluginManager. + + """ + args = self._ensure_basetemp(args) + + import _pytest.config + + config = _pytest.config._prepareconfig(args, self.plugins) + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self.request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args): + """Return a new pytest configured Config instance. + + This returns a new :py:class:`_pytest.config.Config` instance like + :py:meth:`parseconfig`, but also calls the pytest_configure hook. + + """ + config = self.parseconfig(*args) + config._do_configure() + self.request.addfinalizer(config._ensure_unconfigure) + return config + + def getitem(self, source, funcname="test_func"): + """Return the test item for a test function. + + This writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: the module source + + :param funcname: the name of the test function for which to return a + test item + + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, "%r item not found in module:\n%s\nitems: %s" % ( + funcname, + source, + items, + ) + + def getitems(self, source): + """Return all test items collected from the module. + + This writes the source to a python file and runs pytest's collection on + the resulting module, returning all test items contained within. + + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol(self, source, configargs=(), withinit=False): + """Return the module collection node for ``source``. + + This writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: the source code of the module to collect + + :param configargs: any extra arguments to pass to + :py:meth:`parseconfigure` + + :param withinit: whether to also write an ``__init__.py`` file to the + same directory to ensure it is a package + + """ + if isinstance(source, Path): + path = self.tmpdir.join(str(source)) + assert not withinit, "not supported for paths" + else: + kw = {self.request.function.__name__: Source(source).strip()} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name(self, modcol, name): + """Return the collection node for name from the module collection. + + This will search a module collection node for a collection node + matching the given name. + + :param modcol: a module collection node; see :py:meth:`getmodulecol` + + :param name: the name of the node to return + + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + + def popen(self, cmdargs, stdout, stderr, **kw): + """Invoke subprocess.Popen. + + This calls subprocess.Popen making sure the current working directory + is in the PYTHONPATH. + + You probably want to use :py:meth:`run` instead. + + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env + + popen = subprocess.Popen( + cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw + ) + popen.stdin.close() + + return popen + + def run(self, *cmdargs, **kwargs): + """Run a command with arguments. + + Run a process using subprocess.Popen saving the stdout and stderr. + + :param args: the sequence of arguments to pass to `subprocess.Popen()` + :param timeout: the period in seconds after which to timeout and raise + :py:class:`Testdir.TimeoutExpired` + + Returns a :py:class:`RunResult`. + + """ + __tracebackhide__ = True + + timeout = kwargs.pop("timeout", None) + raise_on_kwargs(kwargs) + + cmdargs = [ + str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs + ] + p1 = self.tmpdir.join("stdout") + p2 = self.tmpdir.join("stderr") + print("running:", *cmdargs) + print(" in:", py.path.local()) + f1 = codecs.open(str(p1), "w", encoding="utf8") + f2 = codecs.open(str(p2), "w", encoding="utf8") + try: + now = time.time() + popen = self.popen( + cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32") + ) + + def handle_timeout(): + __tracebackhide__ = True + + timeout_message = ( + "{seconds} second timeout expired running:" + " {command}".format(seconds=timeout, command=cmdargs) + ) + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + elif six.PY3: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + else: + end = time.time() + timeout + + resolution = min(0.1, timeout / 10) + + while True: + ret = popen.poll() + if ret is not None: + break + + if time.time() > end: + handle_timeout() + + time.sleep(resolution) + finally: + f1.close() + f2.close() + f1 = codecs.open(str(p1), "r", encoding="utf8") + f2 = codecs.open(str(p2), "r", encoding="utf8") + try: + out = f1.read().splitlines() + err = f2.read().splitlines() + finally: + f1.close() + f2.close() + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + return RunResult(ret, out, err, time.time() - now) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print("couldn't print to %s because of encoding" % (fp,)) + + def _getpytestargs(self): + return sys.executable, "-mpytest" + + def runpython(self, script): + """Run a python script using sys.executable as interpreter. + + Returns a :py:class:`RunResult`. + + """ + return self.run(sys.executable, script) + + def runpython_c(self, command): + """Run python -c "command", return a :py:class:`RunResult`.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess(self, *args, **kwargs): + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will added using the + ``-p`` command line option. Additionally ``--basetemp`` is used put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" so they do not conflict with the normal numbered + pytest location for temporary files and directories. + + :param args: the sequence of arguments to pass to the pytest subprocess + :param timeout: the period in seconds after which to timeout and raise + :py:class:`Testdir.TimeoutExpired` + + Returns a :py:class:`RunResult`. + + """ + __tracebackhide__ = True + + p = py.path.local.make_numbered_dir( + prefix="runpytest-", keep=None, rootdir=self.tmpdir + ) + args = ("--basetemp=%s" % p,) + args + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ("-p", plugins[0]) + args + args = self._getpytestargs() + args + return self.run(*args, timeout=kwargs.get("timeout")) + + def spawn_pytest(self, string, expect_timeout=10.0): + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + + """ + basetemp = self.tmpdir.mkdir("temp-pexpect") + invoke = " ".join(map(str, self._getpytestargs())) + cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd, expect_timeout=10.0): + """Run a command using pexpect. + + The pexpect child is returned. + + """ + pexpect = pytest.importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + pytest.skip("pypy-64 bit not supported") + if sys.platform.startswith("freebsd"): + pytest.xfail("pexpect does not work reliably on freebsd") + logfile = self.tmpdir.join("spawn.out").open("wb") + child = pexpect.spawn(cmd, logfile=logfile) + self.request.addfinalizer(logfile.close) + child.timeout = expect_timeout + return child + + +def getdecoded(out): + try: + return out.decode("utf-8") + except UnicodeDecodeError: + return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( + py.io.saferepr(out), + ) + + +class LineComp(object): + def __init__(self): + self.stringio = py.io.TextIO() + + def assert_contains_lines(self, lines2): + """Assert that lines2 are contained (linearly) in lines1. + + Return a list of extralines found. + + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + return LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher(object): + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + + """ + + def __init__(self, lines): + self.lines = lines + self._log_output = [] + + def str(self): + """Return the entire original text.""" + return "\n".join(self.lines) + + def _getlines(self, lines2): + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2): + """Check lines exist in the output using in any order. + + Lines are checked using ``fnmatch.fnmatch``. The argument is a list of + lines which have to occur in the output, in any order. + + """ + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2): + """Check lines exist in the output using ``re.match``, in any order. + + The argument is a list of lines which have to occur in the output, in + any order. + + """ + self._match_lines_random(lines2, lambda name, pat: re.match(pat, name)) + + def _match_lines_random(self, lines2, match_func): + """Check lines exist in the output. + + The argument is a list of lines which have to occur in the output, in + any order. Each line can contain glob whildcards. + + """ + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + self._log("line %r not found in output" % line) + raise ValueError(self._log_text) + + def get_lines_after(self, fnline): + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError("line %r not found in output" % fnline) + + def _log(self, *args): + self._log_output.append(" ".join((str(x) for x in args))) + + @property + def _log_text(self): + return "\n".join(self._log_output) + + def fnmatch_lines(self, lines2): + """Search captured text for matching lines using ``fnmatch.fnmatch``. + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also printed on stdout. + + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch") + + def re_match_lines(self, lines2): + """Search captured text for matching lines using ``re.match``. + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also printed on stdout. + + """ + __tracebackhide__ = True + self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match") + + def _match_lines(self, lines2, match_func, match_nickname): + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param list[str] lines2: list of string patterns to match. The actual + format depends on ``match_func`` + :param match_func: a callable ``match_func(line, pattern)`` where line + is the captured line from stdout/stderr and pattern is the matching + pattern + :param str match_nickname: the nickname for the match function that + will be logged to stdout when a match occurs + + """ + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + nextline = None + extralines = [] + __tracebackhide__ = True + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + break + elif match_func(nextline, line): + self._log("%s:" % match_nickname, repr(line)) + self._log(" with:", repr(nextline)) + break + else: + if not nomatchprinted: + self._log("nomatch:", repr(line)) + nomatchprinted = True + self._log(" and:", repr(nextline)) + extralines.append(nextline) + else: + self._log("remains unmatched: %r" % (line,)) + pytest.fail(self._log_text) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pytester.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pytester.pyc new file mode 100644 index 0000000000000000000000000000000000000000..368a79a5a6729b5274e262138cccfff7e1ff6ff3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/pytester.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python.py new file mode 100644 index 0000000000000000000000000000000000000000..6fd74acb1c909f0903f88c2d8ddc7e44d0f02fe3 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python.py @@ -0,0 +1,1449 @@ +""" Python test discovery, setup and run of test functions. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import fnmatch +import inspect +import os +import sys +import warnings +from textwrap import dedent + +import py +import six + +import _pytest +from _pytest import deprecated +from _pytest import fixtures +from _pytest import nodes +from _pytest._code import filter_traceback +from _pytest.compat import ascii_escaped +from _pytest.compat import enum +from _pytest.compat import get_default_arg_names +from _pytest.compat import get_real_func +from _pytest.compat import getfslineno +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_generator +from _pytest.compat import isclass +from _pytest.compat import isfunction +from _pytest.compat import NoneType +from _pytest.compat import NOTSET +from _pytest.compat import REGEX_TYPE +from _pytest.compat import safe_getattr +from _pytest.compat import safe_str +from _pytest.compat import STRING_TYPES +from _pytest.config import hookimpl +from _pytest.main import FSHookProxy +from _pytest.mark.structures import get_unpacked_marks +from _pytest.mark.structures import normalize_mark_list +from _pytest.mark.structures import transfer_markers +from _pytest.outcomes import fail +from _pytest.warning_types import PytestWarning +from _pytest.warning_types import RemovedInPytest4Warning + + +def pyobj_property(name): + def get(self): + node = self.getparent(getattr(__import__("pytest"), name)) + if node is not None: + return node.obj + + doc = "python %s object this node was collected from (can be None)." % ( + name.lower(), + ) + return property(get, None, None, doc) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="show fixtures per test", + ) + parser.addini( + "usefixtures", + type="args", + default=[], + help="list of default fixtures to be used with this project", + ) + parser.addini( + "python_files", + type="args", + default=["test_*.py", "*_test.py"], + help="glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="prefixes or glob names for Python test function and method discovery", + ) + + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append"], + dest="importmode", + help="prepend/append to sys.path when importing test modules, " + "default is to prepend.", + ) + + +def pytest_cmdline_main(config): + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + + +def pytest_generate_tests(metafunc): + # those alternative spellings are common - raise a specific error to alert + # the user + alt_spellings = ["parameterize", "parametrise", "parameterise"] + for attr in alt_spellings: + if hasattr(metafunc.function, attr): + msg = "{0} has '{1}' mark, spelling should be 'parametrize'" + fail(msg.format(metafunc.function.__name__, attr), pytrace=False) + for marker in metafunc.definition.iter_markers(name="parametrize"): + metafunc.parametrize(*marker.args, **marker.kwargs) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/latest/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/latest/fixture.html#usefixtures ", + ) + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem): + testfunction = pyfuncitem.obj + if pyfuncitem._isyieldedfunction(): + testfunction(*pyfuncitem._args) + else: + funcargs = pyfuncitem.funcargs + testargs = {} + for arg in pyfuncitem._fixtureinfo.argnames: + testargs[arg] = funcargs[arg] + testfunction(**testargs) + return True + + +def pytest_collect_file(path, parent): + ext = path.ext + if ext == ".py": + if not parent.session.isinitpath(path): + if not path_matches_patterns( + path, parent.config.getini("python_files") + ["__init__.py"] + ): + return + ihook = parent.session.gethookproxy(path) + return ihook.pytest_pycollect_makemodule(path=path, parent=parent) + + +def path_matches_patterns(path, patterns): + """Returns True if the given py.path.local matches one of the patterns in the list of globs given""" + return any(path.fnmatch(pattern) for pattern in patterns) + + +def pytest_pycollect_makemodule(path, parent): + if path.basename == "__init__.py": + return Package(path, parent) + return Module(path, parent) + + +@hookimpl(hookwrapper=True) +def pytest_pycollect_makeitem(collector, name, obj): + outcome = yield + res = outcome.get_result() + if res is not None: + return + # nothing was collected elsewhere, let's do it here + if isclass(obj): + if collector.istestclass(obj, name): + Class = collector._getcustomclass("Class") + outcome.force_result(Class(name, parent=collector)) + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a funtools.wrapped. + # We musn't if it's been wrapped with mock.patch (python 2 only) + if not (isfunction(obj) or isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestWarning( + "cannot collect %r because it is not a function." % name + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if is_generator(obj): + res = Generator(name, parent=collector) + else: + res = list(collector._genfunctions(name, obj)) + outcome.force_result(res) + + +def pytest_make_parametrize_id(config, val, argname=None): + return None + + +class PyobjContext(object): + module = pyobj_property("Module") + cls = pyobj_property("Class") + instance = pyobj_property("Instance") + + +class PyobjMixin(PyobjContext): + _ALLOW_MARKERS = True + + def __init__(self, *k, **kw): + super(PyobjMixin, self).__init__(*k, **kw) + + def obj(): + def fget(self): + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Instance collector marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + return obj + + def fset(self, value): + self._obj = value + + return property(fget, fset, None, "underlying python object") + + obj = obj() + + def _getobj(self): + return getattr(self.parent.obj, self.name) + + def getmodpath(self, stopatmodule=True, includemodule=False): + """ return python path relative to the containing module. """ + chain = self.listchain() + chain.reverse() + parts = [] + for node in chain: + if isinstance(node, Instance): + continue + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + s = ".".join(parts) + return s.replace(".[", "[") + + def _getfslineno(self): + return getfslineno(self.obj) + + def reportinfo(self): + # XXX caching? + obj = self.obj + compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) + if isinstance(compat_co_firstlineno, int): + # nose compatibility + fspath = sys.modules[obj.__module__].__file__ + if fspath.endswith(".pyc"): + fspath = fspath[:-1] + lineno = compat_co_firstlineno + else: + fspath, lineno = getfslineno(obj) + modpath = self.getmodpath() + assert isinstance(lineno, int) + return fspath, lineno, modpath + + +class PyCollector(PyobjMixin, nodes.Collector): + def funcnamefilter(self, name): + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj): + """ Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name): + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj, name): + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, staticmethod): + # static methods need to be unwrapped + obj = safe_getattr(obj, "__func__", False) + return ( + safe_getattr(obj, "__call__", False) + and fixtures.getfixturemarker(obj) is None + ) + else: + return False + + def istestclass(self, obj, name): + return self.classnamefilter(name) or self.isnosetest(obj) + + def _matches_prefix_or_glob_option(self, option_name, name): + """ + checks if the given name matches the prefix or glob-pattern defined + in ini configuration. + """ + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self): + if not getattr(self.obj, "__test__", True): + return [] + + # NB. we avoid random getattrs and peek in the __dict__ instead + # (XXX originally introduced from a PyPy need, still true?) + dicts = [getattr(self.obj, "__dict__", {})] + for basecls in inspect.getmro(self.obj.__class__): + dicts.append(basecls.__dict__) + seen = {} + values = [] + for dic in dicts: + for name, obj in list(dic.items()): + if name in seen: + continue + seen[name] = True + res = self._makeitem(name, obj) + if res is None: + continue + if not isinstance(res, list): + res = [res] + values.extend(res) + values.sort(key=lambda item: item.reportinfo()[:2]) + return values + + def makeitem(self, name, obj): + warnings.warn(deprecated.COLLECTOR_MAKEITEM, stacklevel=2) + self._makeitem(name, obj) + + def _makeitem(self, name, obj): + # assert self.ihook.fspath == self.fspath, self + return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj) + + def _genfunctions(self, name, funcobj): + module = self.getparent(Module).obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + transfer_markers(funcobj, cls, module) + fm = self.session._fixturemanager + + definition = FunctionDefinition(name=name, parent=self, callobj=funcobj) + fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls) + + metafunc = Metafunc( + definition, fixtureinfo, self.config, cls=cls, module=module + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + if methods: + self.ihook.pytest_generate_tests.call_extra( + methods, dict(metafunc=metafunc) + ) + else: + self.ihook.pytest_generate_tests(metafunc=metafunc) + + Function = self._getcustomclass("Function") + if not metafunc._calls: + yield Function(name, parent=self, fixtureinfo=fixtureinfo) + else: + # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs + fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) + + # add_funcarg_pseudo_fixture_def may have shadowed some fixtures + # with direct parametrization, so make sure we update what the + # function really needs. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = "%s[%s]" % (name, callspec.id) + yield Function( + name=subname, + parent=self, + callspec=callspec, + callobj=funcobj, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +class Module(nodes.File, PyCollector): + """ Collector for test classes and functions. """ + + def _getobj(self): + return self._importtestmodule() + + def collect(self): + self.session._fixturemanager.parsefactories(self) + return super(Module, self).collect() + + def _importtestmodule(self): + # we assume we are only called once per module + importmode = self.config.getoption("--import-mode") + try: + mod = self.fspath.pyimport(ensuresyspath=importmode) + except SyntaxError: + raise self.CollectError( + _pytest._code.ExceptionInfo().getrepr(style="short") + ) + except self.fspath.ImportMismatchError: + e = sys.exc_info()[1] + raise self.CollectError( + "import file mismatch:\n" + "imported module %r has this __file__ attribute:\n" + " %s\n" + "which is not the same as the test file we want to collect:\n" + " %s\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules" % e.args + ) + except ImportError: + from _pytest._code.code import ExceptionInfo + + exc_info = ExceptionInfo() + if self.config.getoption("verbose") < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = safe_str(exc_repr) + raise self.CollectError( + "ImportError while importing test module '{fspath}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) + ) + except _pytest.runner.Skipped as e: + if e.allow_module_level: + raise + raise self.CollectError( + "Using pytest.skip outside of a test is not allowed. " + "To decorate a test function, use the @pytest.mark.skip " + "or @pytest.mark.skipif decorators instead, and to skip a " + "module use `pytestmark = pytest.mark.{skip,skipif}." + ) + self.config.pluginmanager.consider_module(mod) + return mod + + def setup(self): + setup_module = _get_xunit_setup_teardown(self.obj, "setUpModule") + if setup_module is None: + setup_module = _get_xunit_setup_teardown(self.obj, "setup_module") + if setup_module is not None: + setup_module() + + teardown_module = _get_xunit_setup_teardown(self.obj, "tearDownModule") + if teardown_module is None: + teardown_module = _get_xunit_setup_teardown(self.obj, "teardown_module") + if teardown_module is not None: + self.addfinalizer(teardown_module) + + +class Package(Module): + def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): + session = parent.session + nodes.FSCollector.__init__( + self, fspath, parent=parent, config=config, session=session, nodeid=nodeid + ) + self.name = fspath.dirname + self.trace = session.trace + self._norecursepatterns = session._norecursepatterns + self.fspath = fspath + + def _recurse(self, path): + ihook = self.gethookproxy(path.dirpath()) + if ihook.pytest_ignore_collect(path=path, config=self.config): + return False + for pat in self._norecursepatterns: + if path.check(fnmatch=pat): + return False + ihook = self.gethookproxy(path) + ihook.pytest_collect_directory(path=path, parent=self) + return True + + def gethookproxy(self, fspath): + # check if we have the common case of running + # hooks with all conftest.py filesall conftest.py + pm = self.config.pluginmanager + my_conftestmodules = pm._getconftestmodules(fspath) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # one or more conftests are not in use at this fspath + proxy = FSHookProxy(fspath, pm, remove_mods) + else: + # all plugis are active for this fspath + proxy = self.config.hook + return proxy + + def _collectfile(self, path): + ihook = self.gethookproxy(path) + if not self.isinitpath(path): + if ihook.pytest_ignore_collect(path=path, config=self.config): + return () + return ihook.pytest_collect_file(path=path, parent=self) + + def isinitpath(self, path): + return path in self.session._initialpaths + + def collect(self): + # XXX: HACK! + # Before starting to collect any files from this package we need + # to cleanup the duplicate paths added by the session's collect(). + # Proper fix is to not track these as duplicates in the first place. + for path in list(self.session.config.pluginmanager._duplicatepaths): + # if path.parts()[:len(self.fspath.dirpath().parts())] == self.fspath.dirpath().parts(): + if path.dirname.startswith(self.name): + self.session.config.pluginmanager._duplicatepaths.remove(path) + + this_path = self.fspath.dirpath() + init_module = this_path.join("__init__.py") + if init_module.check(file=1) and path_matches_patterns( + init_module, self.config.getini("python_files") + ): + yield Module(init_module, self) + pkg_prefixes = set() + for path in this_path.visit(rec=self._recurse, bf=True, sort=True): + # we will visit our own __init__.py file, in which case we skip it + skip = False + if path.basename == "__init__.py" and path.dirpath() == this_path: + continue + + for pkg_prefix in pkg_prefixes: + if ( + pkg_prefix in path.parts() + and pkg_prefix.join("__init__.py") != path + ): + skip = True + + if skip: + continue + + if path.isdir() and path.join("__init__.py").check(file=1): + pkg_prefixes.add(path) + + for x in self._collectfile(path): + yield x + + +def _get_xunit_setup_teardown(holder, attr_name, param_obj=None): + """ + Return a callable to perform xunit-style setup or teardown if + the function exists in the ``holder`` object. + The ``param_obj`` parameter is the parameter which will be passed to the function + when the callable is called without arguments, defaults to the ``holder`` object. + Return ``None`` if a suitable callable is not found. + """ + param_obj = param_obj if param_obj is not None else holder + result = _get_xunit_func(holder, attr_name) + if result is not None: + arg_count = result.__code__.co_argcount + if inspect.ismethod(result): + arg_count -= 1 + if arg_count: + return lambda: result(param_obj) + else: + return result + + +def _get_xunit_func(obj, name): + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to + avoid calling it twice. + """ + meth = getattr(obj, name, None) + if fixtures.getfixturemarker(meth) is None: + return meth + + +class Class(PyCollector): + """ Collector for test methods. """ + + def collect(self): + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + self.warn( + PytestWarning( + "cannot collect test class %r because it has a " + "__init__ constructor" % self.obj.__name__ + ) + ) + return [] + elif hasnew(self.obj): + self.warn( + PytestWarning( + "cannot collect test class %r because it has a " + "__new__ constructor" % self.obj.__name__ + ) + ) + return [] + return [self._getcustomclass("Instance")(name="()", parent=self)] + + def setup(self): + setup_class = _get_xunit_func(self.obj, "setup_class") + if setup_class is not None: + setup_class = getimfunc(setup_class) + setup_class(self.obj) + + fin_class = getattr(self.obj, "teardown_class", None) + if fin_class is not None: + fin_class = getimfunc(fin_class) + self.addfinalizer(lambda: fin_class(self.obj)) + + +class Instance(PyCollector): + _ALLOW_MARKERS = False # hack, destroy later + # instances share the object with their parents in a way + # that duplicates markers instances if not taken out + # can be removed at node strucutre reorganization time + + def _getobj(self): + return self.parent.obj() + + def collect(self): + self.session._fixturemanager.parsefactories(self) + return super(Instance, self).collect() + + def newinstance(self): + self.obj = self._getobj() + return self.obj + + +class FunctionMixin(PyobjMixin): + """ mixin for the code common to Function and Generator. + """ + + def setup(self): + """ perform setup for this test function. """ + if hasattr(self, "_preservedparent"): + obj = self._preservedparent + elif isinstance(self.parent, Instance): + obj = self.parent.newinstance() + self.obj = self._getobj() + else: + obj = self.parent.obj + if inspect.ismethod(self.obj): + setup_name = "setup_method" + teardown_name = "teardown_method" + else: + setup_name = "setup_function" + teardown_name = "teardown_function" + setup_func_or_method = _get_xunit_setup_teardown( + obj, setup_name, param_obj=self.obj + ) + if setup_func_or_method is not None: + setup_func_or_method() + teardown_func_or_method = _get_xunit_setup_teardown( + obj, teardown_name, param_obj=self.obj + ) + if teardown_func_or_method is not None: + self.addfinalizer(teardown_func_or_method) + + def _prunetraceback(self, excinfo): + if hasattr(self, "_obj") and not self.config.option.fulltrace: + code = _pytest._code.Code(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + + excinfo.traceback = ntraceback.filter() + # issue364: mark all but first and last frames to + # only show a single-line message for each frame + if self.config.option.tbstyle == "auto": + if len(excinfo.traceback) > 2: + for entry in excinfo.traceback[1:-1]: + entry.set_repr_style("short") + + def repr_failure(self, excinfo, outerr=None): + assert outerr is None, "XXX outerr usage is deprecated" + style = self.config.option.tbstyle + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class Generator(FunctionMixin, PyCollector): + def collect(self): + # test generators are seen as collectors but they also + # invoke setup/teardown on popular request + # (induced by the common "test_*" naming shared with normal tests) + from _pytest import deprecated + + self.session._setupstate.prepare(self) + # see FunctionMixin.setup and test_setupstate_is_preserved_134 + self._preservedparent = self.parent.obj + values = [] + seen = {} + for i, x in enumerate(self.obj()): + name, call, args = self.getcallargs(x) + if not callable(call): + raise TypeError("%r yielded non callable test %r" % (self.obj, call)) + if name is None: + name = "[%d]" % i + else: + name = "['%s']" % name + if name in seen: + raise ValueError( + "%r generated tests with non-unique name %r" % (self, name) + ) + seen[name] = True + with warnings.catch_warnings(): + # ignore our own deprecation warning + function_class = self.Function + values.append(function_class(name, self, args=args, callobj=call)) + self.warn(deprecated.YIELD_TESTS) + return values + + def getcallargs(self, obj): + if not isinstance(obj, (tuple, list)): + obj = (obj,) + # explicit naming + if isinstance(obj[0], six.string_types): + name = obj[0] + obj = obj[1:] + else: + name = None + call, args = obj[0], obj[1:] + return name, call, args + + +def hasinit(obj): + init = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + + +def hasnew(obj): + new = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + + +class CallSpec2(object): + def __init__(self, metafunc): + self.metafunc = metafunc + self.funcargs = {} + self._idlist = [] + self.params = {} + self._globalid = NOTSET + self._globalparam = NOTSET + self._arg2scopenum = {} # used for sorting parametrized resources + self.marks = [] + self.indices = {} + + def copy(self): + cs = CallSpec2(self.metafunc) + cs.funcargs.update(self.funcargs) + cs.params.update(self.params) + cs.marks.extend(self.marks) + cs.indices.update(self.indices) + cs._arg2scopenum.update(self._arg2scopenum) + cs._idlist = list(self._idlist) + cs._globalid = self._globalid + cs._globalparam = self._globalparam + return cs + + def _checkargnotcontained(self, arg): + if arg in self.params or arg in self.funcargs: + raise ValueError("duplicate %r" % (arg,)) + + def getparam(self, name): + try: + return self.params[name] + except KeyError: + if self._globalparam is NOTSET: + raise ValueError(name) + return self._globalparam + + @property + def id(self): + return "-".join(map(str, filter(None, self._idlist))) + + def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index): + for arg, val in zip(argnames, valset): + self._checkargnotcontained(arg) + valtype_for_arg = valtypes[arg] + getattr(self, valtype_for_arg)[arg] = val + self.indices[arg] = param_index + self._arg2scopenum[arg] = scopenum + self._idlist.append(id) + self.marks.extend(normalize_mark_list(marks)) + + def setall(self, funcargs, id, param): + for x in funcargs: + self._checkargnotcontained(x) + self.funcargs.update(funcargs) + if id is not NOTSET: + self._idlist.append(id) + if param is not NOTSET: + assert self._globalparam is NOTSET + self._globalparam = param + for arg in funcargs: + self._arg2scopenum[arg] = fixtures.scopenum_function + + +class Metafunc(fixtures.FuncargnamesCompatAttr): + """ + Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook. + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__(self, definition, fixtureinfo, config, cls=None, module=None): + assert ( + isinstance(definition, FunctionDefinition) + or type(definition).__name__ == "DefinitionMock" + ) + self.definition = definition + + #: access to the :class:`_pytest.config.Config` object for the test session + self.config = config + + #: the module object where the test function is defined in. + self.module = module + + #: underlying python test function + self.function = definition.obj + + #: set of fixture names required by the test function + self.fixturenames = fixtureinfo.names_closure + + #: class object where the test function is defined in or ``None``. + self.cls = cls + + self._calls = [] + self._ids = set() + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None): + """ Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather at test setup time. + + :arg argnames: a comma-separated string denoting one or more argument + names, or a list/tuple of argument strings. + + :arg argvalues: The list of argvalues determines how often a + test is invoked with different argument values. If only one + argname was specified argvalues is a list of values. If N + argnames were specified, argvalues must be a list of N-tuples, + where each tuple-element specifies a value for its respective + argname. + + :arg indirect: The list of argnames or boolean. A list of arguments' + names (subset of argnames). If True the list contains all names from + the argnames. Each argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :arg ids: list of string ids, or a callable. + If strings, each is corresponding to the argvalues so that they are + part of the test id. If None is given as id of specific test, the + automatically generated id for that argument will be used. + If callable, it should take one argument (a single argvalue) and return + a string or return None. If None, the automatically generated id for that + argument will be used. + If no ids are provided they will be generated automatically from + the argvalues. + + :arg scope: if specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + from _pytest.fixtures import scope2index + from _pytest.mark import ParameterSet + + argnames, parameters = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + function_definition=self.definition, + ) + del argvalues + + if scope is None: + scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + arg_values_types = self._resolve_arg_value_types(argnames, indirect) + + ids = self._resolve_arg_ids(argnames, ids, parameters, item=self.definition) + + scopenum = scope2index( + scope, descr="parametrize() call in {}".format(self.function.__name__) + ) + + # create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls + newcalls = [] + for callspec in self._calls or [CallSpec2(self)]: + for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)): + newcallspec = callspec.copy() + newcallspec.setmulti2( + arg_values_types, + argnames, + param_set.values, + param_id, + param_set.marks, + scopenum, + param_index, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_arg_ids(self, argnames, ids, parameters, item): + """Resolves the actual ids for the given argnames, based on the ``ids`` parameter given + to ``parametrize``. + + :param List[str] argnames: list of argument names passed to ``parametrize()``. + :param ids: the ids parameter of the parametrized call (see docs). + :param List[ParameterSet] parameters: the list of parameter values, same size as ``argnames``. + :param Item item: the item that generated this parametrized call. + :rtype: List[str] + :return: the list of ids for each argname given + """ + from py.io import saferepr + + idfn = None + if callable(ids): + idfn = ids + ids = None + if ids: + func_name = self.function.__name__ + if len(ids) != len(parameters): + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parameters), len(ids)), pytrace=False) + for id_value in ids: + if id_value is not None and not isinstance(id_value, six.string_types): + msg = "In {}: ids must be list of strings, found: {} (type: {!r})" + fail( + msg.format(func_name, saferepr(id_value), type(id_value)), + pytrace=False, + ) + ids = idmaker(argnames, parameters, idfn, ids, self.config, item=item) + return ids + + def _resolve_arg_value_types(self, argnames, indirect): + """Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg" + to the function, based on the ``indirect`` parameter of the parametrized() call. + + :param List[str] argnames: list of argument names passed to ``parametrize()``. + :param indirect: same ``indirect`` parameter of ``parametrize()``. + :rtype: Dict[str, str] + A dict mapping each arg name to either: + * "params" if the argname should be the parameter of a fixture of the same name. + * "funcargs" if the argname should be a parameter to the parametrized test function. + """ + valtypes = {} + if indirect is True: + valtypes = dict.fromkeys(argnames, "params") + elif indirect is False: + valtypes = dict.fromkeys(argnames, "funcargs") + elif isinstance(indirect, (tuple, list)): + valtypes = dict.fromkeys(argnames, "funcargs") + for arg in indirect: + if arg not in argnames: + fail( + "In {}: indirect fixture '{}' doesn't exist".format( + self.function.__name__, arg + ), + pytrace=False, + ) + valtypes[arg] = "params" + return valtypes + + def _validate_if_using_arg_names(self, argnames, indirect): + """ + Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: list of argument names passed to ``parametrize()``. + :param indirect: same ``indirect`` parameter of ``parametrize()``. + :raise ValueError: if validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + "In {}: function already takes an argument '{}' with a default value".format( + func_name, arg + ), + pytrace=False, + ) + else: + if isinstance(indirect, (tuple, list)): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + "In {}: function uses no {} '{}'".format(func_name, name, arg), + pytrace=False, + ) + + def addcall(self, funcargs=None, id=NOTSET, param=NOTSET): + """ Add a new call to the underlying test function during the collection phase of a test run. + + .. deprecated:: 3.3 + + Use :meth:`parametrize` instead. + + Note that request.addcall() is called during the test collection phase prior and + independently to actual test execution. You should only use addcall() + if you need to specify multiple arguments of a test function. + + :arg funcargs: argument keyword dictionary used when invoking + the test function. + + :arg id: used for reporting and identification purposes. If you + don't supply an `id` an automatic unique id will be generated. + + :arg param: a parameter which will be exposed to a later fixture function + invocation through the ``request.param`` attribute. + """ + warnings.warn(deprecated.METAFUNC_ADD_CALL, stacklevel=2) + + assert funcargs is None or isinstance(funcargs, dict) + if funcargs is not None: + for name in funcargs: + if name not in self.fixturenames: + fail("funcarg %r not used in this function." % name) + else: + funcargs = {} + if id is None: + raise ValueError("id=None not allowed") + if id is NOTSET: + id = len(self._calls) + id = str(id) + if id in self._ids: + raise ValueError("duplicate id %r" % id) + self._ids.add(id) + + cs = CallSpec2(self) + cs.setall(funcargs, id, param) + self._calls.append(cs) + + +def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + from _pytest.fixtures import scopes + + if isinstance(indirect, (list, tuple)): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[0].scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + if used_scopes: + # Takes the most narrow scope from used fixtures + for scope in reversed(scopes): + if scope in used_scopes: + return scope + + return "function" + + +def _idval(val, argname, idx, idfn, item, config): + if idfn: + s = None + try: + s = idfn(val) + except Exception as e: + # See issue https://github.com/pytest-dev/pytest/issues/2169 + msg = ( + "While trying to determine id of parameter {} at position " + "{} the following exception was raised:\n".format(argname, idx) + ) + msg += " {}: {}\n".format(type(e).__name__, e) + msg += "This warning will be an error error in pytest-4.0." + item.warn(RemovedInPytest4Warning(msg)) + if s: + return ascii_escaped(s) + + if config: + hook_id = config.hook.pytest_make_parametrize_id( + config=config, val=val, argname=argname + ) + if hook_id: + return hook_id + + if isinstance(val, STRING_TYPES): + return ascii_escaped(val) + elif isinstance(val, (float, int, bool, NoneType)): + return str(val) + elif isinstance(val, REGEX_TYPE): + return ascii_escaped(val.pattern) + elif enum is not None and isinstance(val, enum.Enum): + return str(val) + elif (isclass(val) or isfunction(val)) and hasattr(val, "__name__"): + return val.__name__ + return str(argname) + str(idx) + + +def _idvalset(idx, parameterset, argnames, idfn, ids, item, config): + if parameterset.id is not None: + return parameterset.id + if ids is None or (idx >= len(ids) or ids[idx] is None): + this_id = [ + _idval(val, argname, idx, idfn, item=item, config=config) + for val, argname in zip(parameterset.values, argnames) + ] + return "-".join(this_id) + else: + return ascii_escaped(ids[idx]) + + +def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None): + ids = [ + _idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item) + for valindex, parameterset in enumerate(parametersets) + ] + if len(set(ids)) != len(ids): + # The ids are not unique + duplicates = [testid for testid in ids if ids.count(testid) > 1] + counters = collections.defaultdict(lambda: 0) + for index, testid in enumerate(ids): + if testid in duplicates: + ids[index] = testid + str(counters[testid]) + counters[testid] += 1 + return ids + + +def show_fixtures_per_test(config): + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +def _show_fixtures_per_test(config, session): + import _pytest.config + + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + def get_best_relpath(func): + loc = getlocation(func, curdir) + return curdir.bestrelpath(loc) + + def write_fixture(fixture_def): + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + if verbose > 0: + bestrel = get_best_relpath(fixture_def.func) + funcargspec = "{} -- {}".format(argname, bestrel) + else: + funcargspec = argname + tw.line(funcargspec, green=True) + fixture_doc = fixture_def.func.__doc__ + if fixture_doc: + write_docstring(tw, fixture_doc) + else: + tw.line(" no docstring available", red=True) + + def write_item(item): + try: + info = item._fixtureinfo + except AttributeError: + # doctests items have no _fixtureinfo attribute + return + if not info.name2fixturedefs: + # this test item does not use any fixtures + return + tw.line() + tw.sep("-", "fixtures used by {}".format(item.name)) + tw.sep("-", "({})".format(get_best_relpath(item.function))) + # dict key not used in loop but needed for sorting + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # last item is expected to be the one used by the test item + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config): + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config, session): + import _pytest.config + + session.perform_collect() + curdir = py.path.local() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + fm = session._fixturemanager + + available = [] + seen = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, curdir) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + curdir.bestrelpath(loc), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, bestrel, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", "fixtures defined from %s" % (module,)) + currentmodule = module + if verbose <= 0 and argname[0] == "_": + continue + if verbose > 0: + funcargspec = "%s -- %s" % (argname, bestrel) + else: + funcargspec = argname + tw.line(funcargspec, green=True) + loc = getlocation(fixturedef.func, curdir) + doc = fixturedef.func.__doc__ or "" + if doc: + write_docstring(tw, doc) + else: + tw.line(" %s: no docstring available" % (loc,), red=True) + + +def write_docstring(tw, doc): + INDENT = " " + doc = doc.rstrip() + if "\n" in doc: + firstline, rest = doc.split("\n", 1) + else: + firstline, rest = doc, "" + + if firstline.strip(): + tw.line(INDENT + firstline.strip()) + + if rest: + for line in dedent(rest).split("\n"): + tw.write(INDENT + line + "\n") + + +class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): + """ a Function Item is responsible for setting up and executing a + Python test function. + """ + + _genid = None + # disable since functions handle it themselfes + _ALLOW_MARKERS = False + + def __init__( + self, + name, + parent, + args=None, + config=None, + callspec=None, + callobj=NOTSET, + keywords=None, + session=None, + fixtureinfo=None, + originalname=None, + ): + super(Function, self).__init__(name, parent, config=config, session=session) + self._args = args + if callobj is not NOTSET: + self.obj = callobj + + self.keywords.update(self.obj.__dict__) + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + # this is total hostile and a mess + # keywords are broken by design by now + # this will be redeemed later + for mark in callspec.marks: + # feel free to cry, this was broken for years before + # and keywords cant fix it per design + self.keywords[mark.name] = mark + self.own_markers.extend(normalize_mark_list(callspec.marks)) + if keywords: + self.keywords.update(keywords) + + if fixtureinfo is None: + fixtureinfo = self.session._fixturemanager.getfixtureinfo( + self, self.obj, self.cls, funcargs=not self._isyieldedfunction() + ) + self._fixtureinfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + #: original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname + + def _initrequest(self): + self.funcargs = {} + if self._isyieldedfunction(): + assert not hasattr( + self, "callspec" + ), "yielded functions (deprecated) cannot have funcargs" + else: + if hasattr(self, "callspec"): + callspec = self.callspec + assert not callspec.funcargs + self._genid = callspec.id + if hasattr(callspec, "param"): + self.param = callspec.param + self._request = fixtures.FixtureRequest(self) + + @property + def function(self): + "underlying python 'function' object" + return getimfunc(self.obj) + + def _getobj(self): + name = self.name + i = name.find("[") # parametrization + if i != -1: + name = name[:i] + return getattr(self.parent.obj, name) + + @property + def _pyfuncitem(self): + "(compatonly) for code expecting pytest-2.2 style request objects" + return self + + def _isyieldedfunction(self): + return getattr(self, "_args", None) is not None + + def runtest(self): + """ execute the underlying test function. """ + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self): + super(Function, self).setup() + fixtures.fillfixtures(self) + + +class FunctionDefinition(Function): + """ + internal hack until we get actual definition nodes instead of the + crappy metafunc hack + """ + + def runtest(self): + raise RuntimeError("function definitions are not supposed to be used") + + setup = runtest diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python.pyc new file mode 100644 index 0000000000000000000000000000000000000000..368c5a7029d3a6ed4338b5cb191c1f5525c5db73 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python_api.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python_api.py new file mode 100644 index 0000000000000000000000000000000000000000..805cd85ad4180403887e594350fc6a6b07acec58 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python_api.py @@ -0,0 +1,721 @@ +import math +import pprint +import sys +from decimal import Decimal +from numbers import Number + +import six +from more_itertools.more import always_iterable +from six.moves import filterfalse +from six.moves import zip + +import _pytest._code +from _pytest.compat import isclass +from _pytest.compat import Mapping +from _pytest.compat import Sequence +from _pytest.compat import STRING_TYPES +from _pytest.outcomes import fail + +BASE_TYPE = (type, STRING_TYPES) + + +def _cmp_raises_type_error(self, other): + """__cmp__ implementation which raises TypeError. Used + by Approx base classes to implement only == and != and raise a + TypeError for other comparisons. + + Needed in Python 2 only, Python 3 all it takes is not implementing the + other operators at all. + """ + __tracebackhide__ = True + raise TypeError( + "Comparison operators other than == and != not supported by approx objects" + ) + + +def _non_numeric_type_error(value, at): + at_str = " at {}".format(at) if at else "" + return TypeError( + "cannot make approximate comparisons to non-numeric values: {!r} {}".format( + value, at_str + ) + ) + + +# builtin pytest.approx helper + + +class ApproxBase(object): + """ + Provide shared utilities for making approximate comparisons between numbers + or sequences of numbers. + """ + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok=False): + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self): + raise NotImplementedError + + def __eq__(self, actual): + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + __hash__ = None + + def __ne__(self, actual): + return not (actual == self) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def _approx_scalar(self, x): + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """ + Yield all the pairs of numbers to be compared. This is used to + implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self): + """ + Raise a TypeError if the expected value is not a valid type. + """ + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + pass + + +def _recursive_list_map(f, x): + if isinstance(x, list): + return list(_recursive_list_map(f, xi) for xi in x) + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """ + Perform approximate comparisons where the expected value is numpy array. + """ + + def __repr__(self): + list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist()) + return "approx({!r})".format(list_scalars) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def __eq__(self, actual): + import numpy as np + + # self.expected is supposed to always be an array here + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except: # noqa + raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual)) + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, np.asscalar(self.expected[i]) + else: + for i in np.ndindex(self.expected.shape): + yield np.asscalar(actual[i]), np.asscalar(self.expected[i]) + + +class ApproxMapping(ApproxBase): + """ + Perform approximate comparisons where the expected value is a mapping with + numeric values (the keys can be anything). + """ + + def __repr__(self): + return "approx({!r})".format( + {k: self._approx_scalar(v) for k, v in self.expected.items()} + ) + + def __eq__(self, actual): + if set(actual.keys()) != set(self.expected.keys()): + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self): + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + elif not isinstance(value, Number): + raise _non_numeric_type_error(self.expected, at="key={!r}".format(key)) + + +class ApproxSequence(ApproxBase): + """ + Perform approximate comparisons where the expected value is a sequence of + numbers. + """ + + def __repr__(self): + seq_type = type(self.expected) + if seq_type not in (tuple, list, set): + seq_type = list + return "approx({!r})".format( + seq_type(self._approx_scalar(x) for x in self.expected) + ) + + def __eq__(self, actual): + if len(actual) != len(self.expected): + return False + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + def _check_type(self): + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + elif not isinstance(x, Number): + raise _non_numeric_type_error( + self.expected, at="index {}".format(index) + ) + + +class ApproxScalar(ApproxBase): + """ + Perform approximate comparisons where the expected value is a single number. + """ + + DEFAULT_ABSOLUTE_TOLERANCE = 1e-12 + DEFAULT_RELATIVE_TOLERANCE = 1e-6 + + def __repr__(self): + """ + Return a string communicating both the expected value and the tolerance + for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode + plus/minus symbol if this is python3 (it's too hard to get right for + python2). + """ + if isinstance(self.expected, complex): + return str(self.expected) + + # Infinities aren't compared using tolerances, so don't show a + # tolerance. + if math.isinf(self.expected): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = "{:.1e}".format(self.tolerance) + except ValueError: + vetted_tolerance = "???" + + if sys.version_info[0] == 2: + return "{} +- {}".format(self.expected, vetted_tolerance) + else: + return u"{} \u00b1 {}".format(self.expected, vetted_tolerance) + + def __eq__(self, actual): + """ + Return true if the given value is equal to the expected value within + the pre-specified tolerance. + """ + if _is_numpy_array(actual): + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in actual.flat) + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): + return self.nan_ok and math.isnan(abs(actual)) + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + return abs(self.expected - actual) <= self.tolerance + + __hash__ = None + + @property + def tolerance(self): + """ + Return the tolerance for the comparison. This could be either an + absolute tolerance or a relative tolerance, depending on what the user + specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + "absolute tolerance can't be negative: {}".format(absolute_tolerance) + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + "relative tolerance can't be negative: {}".format(absolute_tolerance) + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """ + Perform approximate comparisons where the expected value is a decimal. + """ + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + +def approx(expected, rel=None, abs=None, nan_ok=False): + """ + Assert that two numbers (or two sets of numbers) are equal to each other + within some tolerance. + + Due to the `intricacies of floating-point arithmetic`__, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + __ https://docs.python.org/3/tutorial/floatingpoint.html + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + Dictionary *values*:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. Only available in python>=3.5. `More information...`__ + + __ https://docs.python.org/3/library/math.html#math.isclose + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by ``numpy.allclose``. `More information...`__ + + __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered and the absolute tolerance cannot be changed, so this function + is not appropriate for very large or very small numbers. Also, it's only + available in subclasses of ``unittest.TestCase`` and it's ugly because it + doesn't follow PEP8. `More information...`__ + + __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, ``TypeError`` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. `More information...`__ + + __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ + """ + + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls = ApproxDecimal + elif isinstance(expected, Number): + cls = ApproxScalar + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif isinstance(expected, Sequence) and not isinstance(expected, STRING_TYPES): + cls = ApproxSequence + elif _is_numpy_array(expected): + cls = ApproxNumpy + else: + raise _non_numeric_type_error(expected, at=None) + + return cls(expected, rel, abs, nan_ok) + + +def _is_numpy_array(obj): + """ + Return true if the given object is a numpy array. Make a special effort to + avoid importing numpy unless it's really necessary. + """ + import sys + + np = sys.modules.get("numpy") + if np is not None: + return isinstance(obj, np.ndarray) + return False + + +# builtin pytest.raises helper + + +def raises(expected_exception, *args, **kwargs): + r""" + Assert that a code block/function call raises ``expected_exception`` + and raise a failure exception otherwise. + + :arg message: if specified, provides a custom failure message if the + exception is not raised + :arg match: if specified, asserts that the exception matches a text or regex + + This helper produces a ``ExceptionInfo()`` object (see below). + + You may use this function as a context manager:: + + >>> with raises(ZeroDivisionError): + ... 1/0 + + .. versionchanged:: 2.10 + + In the context manager form you may use the keyword argument + ``message`` to specify a custom failure message:: + + >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"): + ... pass + Traceback (most recent call last): + ... + Failed: Expecting ZeroDivisionError + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type == ValueError # this will not execute + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type == ValueError + + + Since version ``3.1`` you can use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + **Legacy forms** + + The forms below are fully supported but are discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + It is also possible to pass a string to be evaluated at runtime:: + + >>> raises(ZeroDivisionError, "f(0)") + + + The string will be evaluated using the same ``locals()`` and ``globals()`` + at the moment of the ``raises`` call. + + .. currentmodule:: _pytest._code + + Consult the API of ``excinfo`` objects: :class:`ExceptionInfo`. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. See the + official Python ``try`` statement documentation for more detailed + information. + + """ + __tracebackhide__ = True + for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)): + msg = ( + "exceptions must be old-style classes or" + " derived from BaseException, not %s" + ) + raise TypeError(msg % type(exc)) + + message = "DID NOT RAISE {}".format(expected_exception) + match_expr = None + + if not args: + if "message" in kwargs: + message = kwargs.pop("message") + if "match" in kwargs: + match_expr = kwargs.pop("match") + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(kwargs.keys()) + raise TypeError(msg) + return RaisesContext(expected_exception, message, match_expr) + elif isinstance(args[0], str): + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + # print "raises frame scope: %r" % frame.f_locals + try: + code = _pytest._code.Source(code).compile() + six.exec_(code, frame.f_globals, loc) + # XXX didn't mean f_globals == f_locals something special? + # this is destroyed here ... + except expected_exception: + return _pytest._code.ExceptionInfo() + else: + func = args[0] + try: + func(*args[1:], **kwargs) + except expected_exception: + return _pytest._code.ExceptionInfo() + fail(message) + + +raises.Exception = fail.Exception + + +class RaisesContext(object): + def __init__(self, expected_exception, message, match_expr): + self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr + self.excinfo = None + + def __enter__(self): + self.excinfo = object.__new__(_pytest._code.ExceptionInfo) + return self.excinfo + + def __exit__(self, *tp): + __tracebackhide__ = True + if tp[0] is None: + fail(self.message) + self.excinfo.__init__(tp) + suppress_exception = issubclass(self.excinfo.type, self.expected_exception) + if sys.version_info[0] == 2 and suppress_exception: + sys.exc_clear() + if self.match_expr and suppress_exception: + self.excinfo.match(self.match_expr) + return suppress_exception diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python_api.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python_api.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b83adfce3ba2a3c38a006d044149375b6e208a5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/python_api.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/recwarn.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/recwarn.py new file mode 100644 index 0000000000000000000000000000000000000000..4f3ab7f2997ae0a3f9af1ffc5b0225e0bf16b921 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/recwarn.py @@ -0,0 +1,241 @@ +""" recording warnings during test function execution. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import inspect +import re +import sys +import warnings + +import six + +import _pytest._code +from _pytest.fixtures import yield_fixture +from _pytest.outcomes import fail + + +@yield_fixture +def recwarn(): + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See http://docs.python.org/library/warnings.html for information + on warning categories. + """ + wrec = WarningsRecorder() + with wrec: + warnings.simplefilter("default") + yield wrec + + +def deprecated_call(func=None, *args, **kwargs): + """context manager that can be used to ensure a block of code triggers a + ``DeprecationWarning`` or ``PendingDeprecationWarning``:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> with deprecated_call(): + ... assert api_call_v2() == 200 + + ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings + types above. + """ + __tracebackhide__ = True + if func is not None: + args = (func,) + args + return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) + + +def warns(expected_warning, *args, **kwargs): + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or + sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or + classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + + This function can be used as a context manager, or any of the other ways + ``pytest.raises`` can be used:: + + >>> with warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the exception matches a text or regex:: + + >>> with warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... + + """ + __tracebackhide__ = True + match_expr = None + if not args: + if "match" in kwargs: + match_expr = kwargs.pop("match") + return WarningsChecker(expected_warning, match_expr=match_expr) + elif isinstance(args[0], str): + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + + with WarningsChecker(expected_warning, match_expr=match_expr): + code = _pytest._code.Source(code).compile() + six.exec_(code, frame.f_globals, loc) + else: + func = args[0] + with WarningsChecker(expected_warning, match_expr=match_expr): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): + """A context manager to record raised warnings. + + Adapted from `warnings.catch_warnings`. + """ + + def __init__(self): + super(WarningsRecorder, self).__init__(record=True) + self._entered = False + self._list = [] + + @property + def list(self): + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i): + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self): + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self): + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls=Warning): + """Pop the first recorded warning, raise exception if not exists.""" + for i, w in enumerate(self._list): + if issubclass(w.category, cls): + return self._list.pop(i) + __tracebackhide__ = True + raise AssertionError("%r not found in warning list" % cls) + + def clear(self): + """Clear the list of recorded warnings.""" + self._list[:] = [] + + def __enter__(self): + if self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot enter %r twice" % self) + self._list = super(WarningsRecorder, self).__enter__() + warnings.simplefilter("always") + # python3 keeps track of a "filter version", when the filters are + # updated previously seen warnings can be re-warned. python2 has no + # concept of this so we must reset the warnings registry manually. + # trivial patching of `warnings.warn` seems to be enough somehow? + if six.PY2: + + def warn(message, category=None, stacklevel=1): + # duplicate the stdlib logic due to + # bad handing in the c version of warnings + if isinstance(message, Warning): + category = message.__class__ + # Check category argument + if category is None: + category = UserWarning + assert issubclass(category, Warning) + + # emulate resetting the warn registry + f_globals = sys._getframe(stacklevel).f_globals + if "__warningregistry__" in f_globals: + orig = f_globals["__warningregistry__"] + f_globals["__warningregistry__"] = None + try: + return self._saved_warn(message, category, stacklevel + 1) + finally: + f_globals["__warningregistry__"] = orig + else: + return self._saved_warn(message, category, stacklevel + 1) + + warnings.warn, self._saved_warn = warn, warnings.warn + return self + + def __exit__(self, *exc_info): + if not self._entered: + __tracebackhide__ = True + raise RuntimeError("Cannot exit %r without entering first" % self) + # see above where `self._saved_warn` is assigned + if six.PY2: + warnings.warn = self._saved_warn + super(WarningsRecorder, self).__exit__(*exc_info) + + +class WarningsChecker(WarningsRecorder): + def __init__(self, expected_warning=None, match_expr=None): + super(WarningsChecker, self).__init__() + + msg = "exceptions must be old-style classes or derived from Warning, not %s" + if isinstance(expected_warning, tuple): + for exc in expected_warning: + if not inspect.isclass(exc): + raise TypeError(msg % type(exc)) + elif inspect.isclass(expected_warning): + expected_warning = (expected_warning,) + elif expected_warning is not None: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning + self.match_expr = match_expr + + def __exit__(self, *exc_info): + super(WarningsChecker, self).__exit__(*exc_info) + + __tracebackhide__ = True + + # only check if we're not currently handling an exception + if all(a is None for a in exc_info): + if self.expected_warning is not None: + if not any(issubclass(r.category, self.expected_warning) for r in self): + __tracebackhide__ = True + fail( + "DID NOT WARN. No warnings of type {} was emitted. " + "The list of emitted warnings is: {}.".format( + self.expected_warning, [each.message for each in self] + ) + ) + elif self.match_expr is not None: + for r in self: + if issubclass(r.category, self.expected_warning): + if re.compile(self.match_expr).search(str(r.message)): + break + else: + fail( + "DID NOT WARN. No warnings of type {} matching" + " ('{}') was emitted. The list of emitted warnings" + " is: {}.".format( + self.expected_warning, + self.match_expr, + [each.message for each in self], + ) + ) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/recwarn.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/recwarn.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54300c0ee7ccf194fb3f906ad2decc5ec613ff83 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/recwarn.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/reports.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/reports.py new file mode 100644 index 0000000000000000000000000000000000000000..b2010cc2e9dfbdddca546cdbff4aa3fc75f0035e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/reports.py @@ -0,0 +1,197 @@ +import py + +from _pytest._code.code import TerminalRepr + + +def getslaveinfoline(node): + try: + return node._slaveinfocache + except AttributeError: + d = node.slaveinfo + ver = "%s.%s.%s" % d["version_info"][:3] + node._slaveinfocache = s = "[%s] %s -- Python %s %s" % ( + d["id"], + d["sysplatform"], + ver, + d["executable"], + ) + return s + + +class BaseReport(object): + def __init__(self, **kw): + self.__dict__.update(kw) + + def toterminal(self, out): + if hasattr(self, "node"): + out.line(getslaveinfoline(self.node)) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr.toterminal(out) + else: + try: + out.line(longrepr) + except UnicodeEncodeError: + out.line("") + + def get_sections(self, prefix): + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self): + """ + Read-only property that returns the full string representation + of ``longrepr``. + + .. versionadded:: 3.0 + """ + tw = py.io.TerminalWriter(stringio=True) + tw.hasmarkup = False + self.toterminal(tw) + exc = tw.stringio.getvalue() + return exc.strip() + + @property + def caplog(self): + """Return captured log lines, if log capturing is enabled + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self): + """Return captured text from stdout, if capturing is enabled + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self): + """Return captured text from stderr, if capturing is enabled + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + passed = property(lambda x: x.outcome == "passed") + failed = property(lambda x: x.outcome == "failed") + skipped = property(lambda x: x.outcome == "skipped") + + @property + def fspath(self): + return self.nodeid.split("::")[0] + + +class TestReport(BaseReport): + """ Basic test report object (also used for setup and teardown calls if + they fail). + """ + + def __init__( + self, + nodeid, + location, + keywords, + outcome, + longrepr, + when, + sections=(), + duration=0, + user_properties=None, + **extra + ): + #: normalized collection node id + self.nodeid = nodeid + + #: a (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + self.location = location + + #: a name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords = keywords + + #: test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: one of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + #: user properties is a list of tuples (name, value) that holds user + #: defined properties of the test + self.user_properties = list(user_properties or []) + + #: list of pairs ``(str, str)`` of extra information which needs to + #: marshallable. Used by pytest to add captured text + #: from ``stdout`` and ``stderr``, but may be used by other plugins + #: to add arbitrary information to reports. + self.sections = list(sections) + + #: time it took to run just the test + self.duration = duration + + self.__dict__.update(extra) + + def __repr__(self): + return "" % ( + self.nodeid, + self.when, + self.outcome, + ) + + +class TeardownErrorReport(BaseReport): + outcome = "failed" + when = "teardown" + + def __init__(self, longrepr, **extra): + self.longrepr = longrepr + self.sections = [] + self.__dict__.update(extra) + + +class CollectReport(BaseReport): + def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra): + self.nodeid = nodeid + self.outcome = outcome + self.longrepr = longrepr + self.result = result or [] + self.sections = list(sections) + self.__dict__.update(extra) + + @property + def location(self): + return (self.fspath, None, self.fspath) + + def __repr__(self): + return "" % ( + self.nodeid, + len(self.result), + self.outcome, + ) + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg): + self.longrepr = msg + + def toterminal(self, out): + out.line(self.longrepr, red=True) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/reports.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/reports.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e054d525612d9a2387aa53a97f872415612918eb Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/reports.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/resultlog.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/resultlog.py new file mode 100644 index 0000000000000000000000000000000000000000..3efdbea6e05196fd05b4c1efedf602d6fc78de47 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/resultlog.py @@ -0,0 +1,123 @@ +""" log machine-parseable test session result information in a plain +text file. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import py + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "resultlog plugin options") + group.addoption( + "--resultlog", + "--result-log", + action="store", + metavar="path", + default=None, + help="DEPRECATED path for machine-readable result log.", + ) + + +def pytest_configure(config): + resultlog = config.option.resultlog + # prevent opening resultlog on slave nodes (xdist) + if resultlog and not hasattr(config, "slaveinput"): + dirname = os.path.dirname(os.path.abspath(resultlog)) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logfile = open(resultlog, "w", 1) # line buffered + config._resultlog = ResultLog(config, logfile) + config.pluginmanager.register(config._resultlog) + + from _pytest.deprecated import RESULT_LOG + from _pytest.warnings import _issue_config_warning + + _issue_config_warning(RESULT_LOG, config) + + +def pytest_unconfigure(config): + resultlog = getattr(config, "_resultlog", None) + if resultlog: + resultlog.logfile.close() + del config._resultlog + config.pluginmanager.unregister(resultlog) + + +def generic_path(item): + chain = item.listchain() + gpath = [chain[0].name] + fspath = chain[0].fspath + fspart = False + for node in chain[1:]: + newfspath = node.fspath + if newfspath == fspath: + if fspart: + gpath.append(":") + fspart = False + else: + gpath.append(".") + else: + gpath.append("/") + fspart = True + name = node.name + if name[0] in "([": + gpath.pop() + gpath.append(name) + fspath = newfspath + return "".join(gpath) + + +class ResultLog(object): + def __init__(self, config, logfile): + self.config = config + self.logfile = logfile # preferably line buffered + + def write_log_entry(self, testpath, lettercode, longrepr): + print("%s %s" % (lettercode, testpath), file=self.logfile) + for line in longrepr.splitlines(): + print(" %s" % line, file=self.logfile) + + def log_outcome(self, report, lettercode, longrepr): + testpath = getattr(report, "nodeid", None) + if testpath is None: + testpath = report.fspath + self.write_log_entry(testpath, lettercode, longrepr) + + def pytest_runtest_logreport(self, report): + if report.when != "call" and report.passed: + return + res = self.config.hook.pytest_report_teststatus(report=report) + code = res[1] + if code == "x": + longrepr = str(report.longrepr) + elif code == "X": + longrepr = "" + elif report.passed: + longrepr = "" + elif report.failed: + longrepr = str(report.longrepr) + elif report.skipped: + longrepr = str(report.longrepr[2]) + self.log_outcome(report, code, longrepr) + + def pytest_collectreport(self, report): + if not report.passed: + if report.failed: + code = "F" + longrepr = str(report.longrepr) + else: + assert report.skipped + code = "S" + longrepr = "%s:%d: %s" % report.longrepr + self.log_outcome(report, code, longrepr) + + def pytest_internalerror(self, excrepr): + reprcrash = getattr(excrepr, "reprcrash", None) + path = getattr(reprcrash, "path", None) + if path is None: + path = "cwd:%s" % py.path.local() + self.write_log_entry(path, "!", str(excrepr)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/resultlog.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/resultlog.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdb85f6dce60de50b88cb47ea4e8475f9d419dfe Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/resultlog.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/runner.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/runner.py new file mode 100644 index 0000000000000000000000000000000000000000..4d4b06d7cb3b6e1042bf85de67682200a6a79898 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/runner.py @@ -0,0 +1,394 @@ +""" basic collect and runtest protocol implementations """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import bdb +import os +import sys +from time import time + +import six + +from .reports import CollectErrorRepr +from .reports import CollectReport +from .reports import TestReport +from _pytest._code.code import ExceptionInfo +from _pytest.outcomes import skip +from _pytest.outcomes import Skipped +from _pytest.outcomes import TEST_OUTCOME + +# +# pytest plugin hooks + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="show N slowest setup/test durations (N=0 for all).", + ), + + +def pytest_terminal_summary(terminalreporter): + durations = terminalreporter.config.option.durations + verbose = terminalreporter.config.getvalue("verbose") + if durations is None: + return + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration) + dlist.reverse() + if not durations: + tr.write_sep("=", "slowest test durations") + else: + tr.write_sep("=", "slowest %s test durations" % durations) + dlist = dlist[:durations] + + for rep in dlist: + if verbose < 2 and rep.duration < 0.005: + tr.write_line("") + tr.write_line("(0.00 durations hidden. Use -vv to show these durations.)") + break + nodeid = rep.nodeid.replace("::()::", "::") + tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, nodeid)) + + +def pytest_sessionstart(session): + session._setupstate = SetupState() + + +def pytest_sessionfinish(session): + session._setupstate.teardown_all() + + +def pytest_runtest_protocol(item, nextitem): + item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol(item, log=True, nextitem=None): + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: + item._initrequest() + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.option.setupshow: + show_test_item(item) + if not item.config.option.setuponly: + reports.append(call_and_report(item, "call", log)) + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # after all teardown hooks have been called + # want funcargs and request info to go away + if hasrequest: + item._request = False + item.funcargs = None + return reports + + +def show_test_item(item): + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item._nodeid) + used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys()) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + + +def pytest_runtest_setup(item): + _update_current_test_var(item, "setup") + item.session._setupstate.prepare(item) + + +def pytest_runtest_call(item): + _update_current_test_var(item, "call") + sys.last_type, sys.last_value, sys.last_traceback = (None, None, None) + try: + item.runtest() + except Exception: + # Store trace info to allow postmortem debugging + type, value, tb = sys.exc_info() + tb = tb.tb_next # Skip *this* frame + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + del type, value, tb # Get rid of these in this frame + raise + + +def pytest_runtest_teardown(item, nextitem): + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(item, nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var(item, when): + """ + Update PYTEST_CURRENT_TEST to reflect the current item and stage. + + If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = "{} ({})".format(item.nodeid, when) + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report): + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + + +# +# Implementation + + +def call_and_report(item, when, log=True, **kwds): + call = call_runtest_hook(item, when, **kwds) + hook = item.ihook + report = hook.pytest_runtest_makereport(item=item, call=call) + if log: + hook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + hook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def check_interactive_exception(call, report): + return call.excinfo and not ( + hasattr(report, "wasxfail") + or call.excinfo.errisinstance(skip.Exception) + or call.excinfo.errisinstance(bdb.BdbQuit) + ) + + +def call_runtest_hook(item, when, **kwds): + hookname = "pytest_runtest_" + when + ihook = getattr(item.ihook, hookname) + return CallInfo( + lambda: ihook(item=item, **kwds), + when=when, + treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb"), + ) + + +class CallInfo(object): + """ Result/Exception info a function invocation. """ + + #: None or ExceptionInfo object. + excinfo = None + + def __init__(self, func, when, treat_keyboard_interrupt_as_exception=False): + #: context of invocation: one of "setup", "call", + #: "teardown", "memocollect" + self.when = when + self.start = time() + try: + self.result = func() + except KeyboardInterrupt: + if treat_keyboard_interrupt_as_exception: + self.excinfo = ExceptionInfo() + else: + self.stop = time() + raise + except: # noqa + self.excinfo = ExceptionInfo() + self.stop = time() + + def __repr__(self): + if self.excinfo: + status = "exception: %s" % str(self.excinfo.value) + else: + status = "result: %r" % (self.result,) + return "" % (self.when, status) + + +def pytest_runtest_makereport(item, call): + when = call.when + duration = call.stop - call.start + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome = "passed" + longrepr = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif excinfo.errisinstance(skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py( + excinfo, style=item.config.option.tbstyle + ) + for rwhen, key, content in item._report_sections: + sections.append(("Captured %s %s" % (key, rwhen), content)) + return TestReport( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + user_properties=item.user_properties, + ) + + +def pytest_make_collect_report(collector): + call = CallInfo(lambda: list(collector.collect()), "collect") + longrepr = None + if not call.excinfo: + outcome = "passed" + else: + from _pytest import nose + + skip_exceptions = (Skipped,) + nose.get_skip_exceptions() + if call.excinfo.errisinstance(skip_exceptions): + outcome = "skipped" + r = collector._repr_failure_py(call.excinfo, "line").reprcrash + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + rep = CollectReport( + collector.nodeid, outcome, longrepr, getattr(call, "result", None) + ) + rep.call = call # see collect_one_node + return rep + + +class SetupState(object): + """ shared state for setting up/tearing down test items or collectors. """ + + def __init__(self): + self.stack = [] + self._finalizers = {} + + def addfinalizer(self, finalizer, colitem): + """ attach a finalizer to the given colitem. + if colitem is None, this will add a finalizer that + is called at the end of teardown_all(). + """ + assert colitem and not isinstance(colitem, tuple) + assert callable(finalizer) + # assert colitem in self.stack # some unit tests don't setup stack :/ + self._finalizers.setdefault(colitem, []).append(finalizer) + + def _pop_and_teardown(self): + colitem = self.stack.pop() + self._teardown_with_finalization(colitem) + + def _callfinalizers(self, colitem): + finalizers = self._finalizers.pop(colitem, None) + exc = None + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = sys.exc_info() + if exc: + six.reraise(*exc) + + def _teardown_with_finalization(self, colitem): + self._callfinalizers(colitem) + if hasattr(colitem, "teardown"): + colitem.teardown() + for colitem in self._finalizers: + assert ( + colitem is None or colitem in self.stack or isinstance(colitem, tuple) + ) + + def teardown_all(self): + while self.stack: + self._pop_and_teardown() + for key in list(self._finalizers): + self._teardown_with_finalization(key) + assert not self._finalizers + + def teardown_exact(self, item, nextitem): + needed_collectors = nextitem and nextitem.listchain() or [] + self._teardown_towards(needed_collectors) + + def _teardown_towards(self, needed_collectors): + exc = None + while self.stack: + if self.stack == needed_collectors[: len(self.stack)]: + break + try: + self._pop_and_teardown() + except TEST_OUTCOME: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = sys.exc_info() + if exc: + six.reraise(*exc) + + def prepare(self, colitem): + """ setup objects along the collector chain to the test-method + and teardown previously setup objects.""" + needed_collectors = colitem.listchain() + self._teardown_towards(needed_collectors) + + # check if the last collection node has raised an error + for col in self.stack: + if hasattr(col, "_prepare_exc"): + six.reraise(*col._prepare_exc) + for col in needed_collectors[len(self.stack) :]: + self.stack.append(col) + try: + col.setup() + except TEST_OUTCOME: + col._prepare_exc = sys.exc_info() + raise + + +def collect_one_node(collector): + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/runner.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/runner.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2df3a23d6c289fbdcf4bd61120fbeaa3de2434c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/runner.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setuponly.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setuponly.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd4ad6d88f7b8f8ace0d66ae3066d7e6f071876 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setuponly.py @@ -0,0 +1,88 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys + +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="only setup fixtures, do not execute tests.", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="show setup of fixtures while executing tests.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_fixture_setup(fixturedef, request): + yield + config = request.config + if config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + fixturedef.cached_param = fixturedef.ids(request.param) + else: + fixturedef.cached_param = fixturedef.ids[request.param_index] + else: + fixturedef.cached_param = request.param + _show_fixture_action(fixturedef, "SETUP") + + +def pytest_fixture_post_finalizer(fixturedef): + if hasattr(fixturedef, "cached_result"): + config = fixturedef._fixturemanager.config + if config.option.setupshow: + _show_fixture_action(fixturedef, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param + + +def _show_fixture_action(fixturedef, msg): + config = fixturedef._fixturemanager.config + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + out, err = capman.read_global_capture() + + tw = config.get_terminal_writer() + tw.line() + tw.write(" " * 2 * fixturedef.scopenum) + tw.write( + "{step} {scope} {fixture}".format( + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname, + ) + ) + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write("[{}]".format(fixturedef.cached_param)) + + if capman: + capman.resume_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setuponly: + config.option.setupshow = True diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setuponly.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setuponly.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f71e6994beb82042067674d9aa1472283b4f6d22 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setuponly.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setupplan.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setupplan.py new file mode 100644 index 0000000000000000000000000000000000000000..351e0be650fa3c0f4f0db23406f24d2b92ba24df --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setupplan.py @@ -0,0 +1,31 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pytest + + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="show what fixtures and tests would be executed but " + "don't execute anything.", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup(fixturedef, request): + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + fixturedef.cached_result = (None, None, None) + return fixturedef.cached_result + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setupplan.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setupplan.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78ddf3c9f8cefec135cb6ee3e41d846c04c0d841 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/setupplan.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/skipping.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/skipping.py new file mode 100644 index 0000000000000000000000000000000000000000..f755fc4eb8009c1695a6b619f91c0f026f2028c4 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/skipping.py @@ -0,0 +1,298 @@ +""" support for skip/xfail functions and markers. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from _pytest.config import hookimpl +from _pytest.mark.evaluate import MarkEvaluator +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="run tests even if they are marked xfail", + ) + + parser.addini( + "xfail_strict", + "default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) + + +def pytest_configure(config): + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config._cleanup.append(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition): skip the given test function if eval(condition) " + "results in a True value. Evaluation happens within the " + "module global context. Example: skipif('sys.platform == \"win32\"') " + "skips the test if we are on the win32 platform. see " + "https://docs.pytest.org/en/latest/skipping.html", + ) + config.addinivalue_line( + "markers", + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/latest/skipping.html", + ) + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item): + # Check if skip or skipif are specified as pytest marks + item._skipped_by_mark = False + eval_skipif = MarkEvaluator(item, "skipif") + if eval_skipif.istrue(): + item._skipped_by_mark = True + skip(eval_skipif.getexplanation()) + + for skip_info in item.iter_markers(name="skip"): + item._skipped_by_mark = True + if "reason" in skip_info.kwargs: + skip(skip_info.kwargs["reason"]) + elif skip_info.args: + skip(skip_info.args[0]) + else: + skip("unconditional skip") + + item._evalxfail = MarkEvaluator(item, "xfail") + check_xfail_no_run(item) + + +@hookimpl(hookwrapper=True) +def pytest_pyfunc_call(pyfuncitem): + check_xfail_no_run(pyfuncitem) + outcome = yield + passed = outcome.excinfo is None + if passed: + check_strict_xfail(pyfuncitem) + + +def check_xfail_no_run(item): + """check xfail(run=False)""" + if not item.config.option.runxfail: + evalxfail = item._evalxfail + if evalxfail.istrue(): + if not evalxfail.get("run", True): + xfail("[NOTRUN] " + evalxfail.getexplanation()) + + +def check_strict_xfail(pyfuncitem): + """check xfail(strict=True) for the given PASSING test""" + evalxfail = pyfuncitem._evalxfail + if evalxfail.istrue(): + strict_default = pyfuncitem.config.getini("xfail_strict") + is_strict_xfail = evalxfail.get("strict", strict_default) + if is_strict_xfail: + del pyfuncitem._evalxfail + explanation = evalxfail.getexplanation() + fail("[XPASS(strict)] " + explanation, pytrace=False) + + +@hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item, call): + outcome = yield + rep = outcome.get_result() + evalxfail = getattr(item, "_evalxfail", None) + # unitttest special case, see setting of _unexpectedsuccess + if hasattr(item, "_unexpectedsuccess") and rep.when == "call": + from _pytest.compat import _is_unittest_unexpected_success_a_failure + + if item._unexpectedsuccess: + rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess) + else: + rep.longrepr = "Unexpected success" + if _is_unittest_unexpected_success_a_failure(): + rep.outcome = "failed" + else: + rep.outcome = "passed" + rep.wasxfail = rep.longrepr + elif item.config.option.runxfail: + pass # don't interefere + elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): + rep.wasxfail = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): + if call.excinfo: + if evalxfail.invalidraise(call.excinfo.value): + rep.outcome = "failed" + else: + rep.outcome = "skipped" + rep.wasxfail = evalxfail.getexplanation() + elif call.when == "call": + strict_default = item.config.getini("xfail_strict") + is_strict_xfail = evalxfail.get("strict", strict_default) + explanation = evalxfail.getexplanation() + if is_strict_xfail: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] {}".format(explanation) + else: + rep.outcome = "passed" + rep.wasxfail = explanation + elif ( + getattr(item, "_skipped_by_mark", False) + and rep.skipped + and type(rep.longrepr) is tuple + ): + # skipped by mark.skipif; change the location of the failure + # to point to the item definition, otherwise it will display + # the location of where the skip exception was raised within pytest + filename, line, reason = rep.longrepr + filename, line = item.location[:2] + rep.longrepr = filename, line, reason + + +# called by terminalreporter progress reporting + + +def pytest_report_teststatus(report): + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "xfail" + elif report.passed: + return "xpassed", "X", ("XPASS", {"yellow": True}) + + +# called by the terminalreporter instance/plugin + + +def pytest_terminal_summary(terminalreporter): + tr = terminalreporter + if not tr.reportchars: + # for name in "xfailed skipped failed xpassed": + # if not tr.stats.get(name, 0): + # tr.write_line("HINT: use '-r' option to see extra " + # "summary info about tests") + # break + return + + lines = [] + for char in tr.reportchars: + action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None) + action(terminalreporter, lines) + + if lines: + tr._tw.sep("=", "short test summary info") + for line in lines: + tr._tw.line(line) + + +def show_simple(terminalreporter, lines, stat, format): + failed = terminalreporter.stats.get(stat) + if failed: + for rep in failed: + pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) + lines.append(format % (pos,)) + + +def show_xfailed(terminalreporter, lines): + xfailed = terminalreporter.stats.get("xfailed") + if xfailed: + for rep in xfailed: + pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) + reason = rep.wasxfail + lines.append("XFAIL %s" % (pos,)) + if reason: + lines.append(" " + str(reason)) + + +def show_xpassed(terminalreporter, lines): + xpassed = terminalreporter.stats.get("xpassed") + if xpassed: + for rep in xpassed: + pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) + reason = rep.wasxfail + lines.append("XPASS %s %s" % (pos, reason)) + + +def folded_skips(skipped): + d = {} + for event in skipped: + key = event.longrepr + assert len(key) == 3, (event, key) + keywords = getattr(event, "keywords", {}) + # folding reports with global pytestmark variable + # this is workaround, because for now we cannot identify the scope of a skip marker + # TODO: revisit after marks scope would be fixed + when = getattr(event, "when", None) + if when == "setup" and "skip" in keywords and "pytestmark" not in keywords: + key = (key[0], None, key[2]) + d.setdefault(key, []).append(event) + values = [] + for key, events in d.items(): + values.append((len(events),) + key) + return values + + +def show_skipped(terminalreporter, lines): + tr = terminalreporter + skipped = tr.stats.get("skipped", []) + if skipped: + # if not tr.hasopt('skipped'): + # tr.write_line( + # "%d skipped tests, specify -rs for more info" % + # len(skipped)) + # return + fskips = folded_skips(skipped) + if fskips: + # tr.write_sep("_", "skipped test summary") + for num, fspath, lineno, reason in fskips: + if reason.startswith("Skipped: "): + reason = reason[9:] + if lineno is not None: + lines.append( + "SKIP [%d] %s:%d: %s" % (num, fspath, lineno + 1, reason) + ) + else: + lines.append("SKIP [%d] %s: %s" % (num, fspath, reason)) + + +def shower(stat, format): + def show_(terminalreporter, lines): + return show_simple(terminalreporter, lines, stat, format) + + return show_ + + +REPORTCHAR_ACTIONS = { + "x": show_xfailed, + "X": show_xpassed, + "f": shower("failed", "FAIL %s"), + "F": shower("failed", "FAIL %s"), + "s": show_skipped, + "S": show_skipped, + "p": shower("passed", "PASSED %s"), + "E": shower("error", "ERROR %s"), +} diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/skipping.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/skipping.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f9a2a71dd4608367b5d93e6cf8fa3f117da382d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/skipping.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/terminal.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/terminal.py new file mode 100644 index 0000000000000000000000000000000000000000..0d5a08185abbdbb147073515a4709fe4375a1e26 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/terminal.py @@ -0,0 +1,879 @@ +""" terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import itertools +import platform +import sys +import time + +import attr +import pluggy +import py +import six +from more_itertools import collapse + +import pytest +from _pytest import nodes +from _pytest.main import EXIT_INTERRUPTED +from _pytest.main import EXIT_NOTESTSCOLLECTED +from _pytest.main import EXIT_OK +from _pytest.main import EXIT_TESTSFAILED +from _pytest.main import EXIT_USAGEERROR + + +class MoreQuietAction(argparse.Action): + """ + a modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time + + used to unify verbosity handling + """ + + def __init__(self, option_strings, dest, default=None, required=False, help=None): + super(MoreQuietAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group._addoption( + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="increase verbosity.", + ), + group._addoption( + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="decrease verbosity.", + ), + group._addoption( + "--verbosity", dest="verbose", type=int, default=0, help="set verbosity" + ) + group._addoption( + "-r", + action="store", + dest="reportchars", + default="", + metavar="chars", + help="show extra test summary info as specified by chars (f)ailed, " + "(E)error, (s)skipped, (x)failed, (X)passed, " + "(p)passed, (P)passed with output, (a)all except pP. " + "Warnings are displayed at all times except when " + "--disable-warnings is set", + ) + group._addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="disable warnings summary", + ) + group._addoption( + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="show locals in tracebacks (disabled by default).", + ) + group._addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="traceback print mode (auto/long/short/line/native/no).", + ) + group._addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default is 'all'.", + ) + group._addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="don't cut any tracebacks (default is to cut).", + ) + group._addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="color terminal output (yes/no/auto).", + ) + + parser.addini( + "console_output_style", + help="console output: classic or with additional progress information (classic|progress).", + default="progress", + ) + + +def pytest_configure(config): + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + +def getreportopt(config): + reportopts = "" + reportchars = config.option.reportchars + if not config.option.disable_warnings and "w" not in reportchars: + reportchars += "w" + elif config.option.disable_warnings and "w" in reportchars: + reportchars = reportchars.replace("w", "") + if reportchars: + for char in reportchars: + if char not in reportopts and char != "a": + reportopts += char + elif char == "a": + reportopts = "fEsxXw" + return reportopts + + +def pytest_report_teststatus(report): + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + elif report.failed: + letter = "F" + if report.when != "call": + letter = "f" + return report.outcome, letter, report.outcome.upper() + + +@attr.s +class WarningReport(object): + """ + Simple structure to hold warnings information captured by ``pytest_logwarning`` and ``pytest_warning_captured``. + + :ivar str message: user friendly message about the warning + :ivar str|None nodeid: node id that generated the warning (see ``get_location``). + :ivar tuple|py.path.local fslocation: + file system location of the source of the warning (see ``get_location``). + + :ivar bool legacy: if this warning report was generated from the deprecated ``pytest_logwarning`` hook. + """ + + message = attr.ib() + nodeid = attr.ib(default=None) + fslocation = attr.ib(default=None) + legacy = attr.ib(default=False) + + def get_location(self, config): + """ + Returns the more user-friendly information about the location + of a warning, or None. + """ + if self.nodeid: + return self.nodeid + if self.fslocation: + if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: + filename, linenum = self.fslocation[:2] + relpath = py.path.local(filename).relto(config.invocation_dir) + if not relpath: + relpath = str(filename) + return "%s:%s" % (relpath, linenum) + else: + return str(self.fslocation) + return None + + +class TerminalReporter(object): + def __init__(self, config, file=None): + import _pytest.config + + self.config = config + self.verbosity = self.config.option.verbose + self.showheader = self.verbosity >= 0 + self.showfspath = self.verbosity >= 0 + self.showlongtestinfo = self.verbosity > 0 + self._numcollected = 0 + self._session = None + + self.stats = {} + self.startdir = py.path.local() + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + # self.writer will be deprecated in pytest-3.4 + self.writer = self._tw + self._screen_width = self._tw.fullwidth + self.currentfspath = None + self.reportchars = getreportopt(config) + self.hasmarkup = self._tw.hasmarkup + self.isatty = file.isatty() + self._progress_nodeids_reported = set() + self._show_progress_info = self._determine_show_progress_info() + + def _determine_show_progress_info(self): + """Return True if we should display progress information based on the current config""" + # do not show progress if we are not capturing output (#3038) + if self.config.getoption("capture") == "no": + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow"): + return False + return self.config.getini("console_output_style") in ("progress", "count") + + def hasopt(self, char): + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid, res): + fspath = self.config.rootdir.join(nodeid.split("::")[0]) + if fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + fspath = self.startdir.bestrelpath(fspath) + self._tw.line() + self._tw.write(fspath + " ") + self._tw.write(res) + + def write_ensure_prefix(self, prefix, extra="", **kwargs): + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self): + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def write(self, content, **markup): + self._tw.write(content, **markup) + + def write_line(self, line, **markup): + if not isinstance(line, six.text_type): + line = six.text_type(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line, **markup): + """ + Rewinds the terminal cursor to the beginning and writes the given line. + + :kwarg erase: if True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep(self, sep, title=None, **markup): + self.ensure_newline() + self._tw.sep(sep, title, **markup) + + def section(self, title, sep="=", **kw): + self._tw.sep(sep, title, **kw) + + def line(self, msg, **kw): + self._tw.line(msg, **kw) + + def pytest_internalerror(self, excrepr): + for line in six.text_type(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return 1 + + def pytest_logwarning(self, fslocation, message, nodeid): + warnings = self.stats.setdefault("warnings", []) + warning = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid, legacy=True + ) + warnings.append(warning) + + def pytest_warning_captured(self, warning_message, item): + # from _pytest.nodes import get_fslocation_from_item + from _pytest.warnings import warning_record_to_str + + warnings = self.stats.setdefault("warnings", []) + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + nodeid = item.nodeid if item is not None else "" + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + warnings.append(warning_report) + + def pytest_plugin_registered(self, plugin): + if self.config.option.traceconfig: + msg = "PLUGIN registered: %s" % (plugin,) + # XXX this event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line + self.write_line(msg) + + def pytest_deselected(self, items): + self.stats.setdefault("deselected", []).extend(items) + + def pytest_runtest_logstart(self, nodeid, location): + # ensure that the path is printed before the + # 1st test of a module starts running + if self.showlongtestinfo: + line = self._locationline(nodeid, *location) + self.write_ensure_prefix(line, "") + elif self.showfspath: + fsid = nodeid.split("::")[0] + self.write_fspath_result(fsid, "") + + def pytest_runtest_logreport(self, report): + rep = report + res = self.config.hook.pytest_report_teststatus(report=rep) + category, letter, word = res + if isinstance(word, tuple): + word, markup = word + else: + markup = None + self.stats.setdefault(category, []).append(rep) + self._tests_ran = True + if not letter and not word: + # probably passed setup/teardown + return + running_xdist = hasattr(rep, "node") + if self.verbosity <= 0: + if not running_xdist and self.showfspath: + self.write_fspath_result(rep.nodeid, letter) + else: + self._tw.write(letter) + else: + self._progress_nodeids_reported.add(rep.nodeid) + if markup is None: + if rep.passed: + markup = {"green": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + line = self._locationline(rep.nodeid, *rep.location) + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write("[%s]" % rep.node.gateway.id) + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + + def pytest_runtest_logfinish(self, nodeid): + if self.config.getini("console_output_style") == "count": + num_tests = self._session.testscollected + progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests))) + else: + progress_length = len(" [100%]") + + if self.verbosity <= 0 and self._show_progress_info: + self._progress_nodeids_reported.add(nodeid) + last_item = ( + len(self._progress_nodeids_reported) == self._session.testscollected + ) + if last_item: + self._write_progress_information_filling_space() + else: + w = self._width_of_current_line + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", cyan=True) + + def _get_progress_information_message(self): + if self.config.getoption("capture") == "no": + return "" + collected = self._session.testscollected + if self.config.getini("console_output_style") == "count": + if collected: + progress = self._progress_nodeids_reported + counter_format = "{{:{}d}}".format(len(str(collected))) + format_string = " [{}/{{}}]".format(counter_format) + return format_string.format(len(progress), collected) + return " [ {} / {} ]".format(collected, collected) + else: + if collected: + progress = len(self._progress_nodeids_reported) * 100 // collected + return " [{:3d}%]".format(progress) + return " [100%]" + + def _write_progress_information_filling_space(self): + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), cyan=True) + + @property + def _width_of_current_line(self): + """Return the width of current line, using the superior implementation of py-1.6 when available""" + try: + return self._tw.width_of_current_line + except AttributeError: + # py < 1.6.0 + return self._tw.chars_on_current_line + + def pytest_collection(self): + if not self.isatty and self.config.option.verbose >= 1: + self.write("collecting ... ", bold=True) + + def pytest_collectreport(self, report): + if report.failed: + self.stats.setdefault("error", []).append(report) + elif report.skipped: + self.stats.setdefault("skipped", []).append(report) + items = [x for x in report.result if isinstance(x, pytest.Item)] + self._numcollected += len(items) + if self.isatty: + # self.write_fspath_result(report.nodeid, 'E') + self.report_collect() + + def report_collect(self, final=False): + if self.config.option.verbose < 0: + return + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + if final: + line = "collected " + else: + line = "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += " / %d errors" % errors + if deselected: + line += " / %d deselected" % deselected + if skipped: + line += " / %d skipped" % skipped + if self.isatty: + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @pytest.hookimpl(trylast=True) + def pytest_collection_modifyitems(self): + self.report_collect(True) + + @pytest.hookimpl(trylast=True) + def pytest_sessionstart(self, session): + self._session = session + self._sessionstarttime = time.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + msg = "platform %s -- Python %s" % (sys.platform, verinfo) + if hasattr(sys, "pypy_version_info"): + verinfo = ".".join(map(str, sys.pypy_version_info[:3])) + msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3]) + msg += ", pytest-%s, py-%s, pluggy-%s" % ( + pytest.__version__, + py.__version__, + pluggy.__version__, + ) + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, startdir=self.startdir + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks(self, lines): + lines.reverse() + for line in collapse(lines): + self.write_line(line) + + def pytest_report_header(self, config): + inifile = "" + if config.inifile: + inifile = " " + config.rootdir.bestrelpath(config.inifile) + lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)] + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + + lines.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) + return lines + + def pytest_collection_finish(self, session): + if self.config.option.collectonly: + self._printcollecteditems(session.items) + if self.stats.get("failed"): + self._tw.sep("!", "collection failures") + for rep in self.stats.get("failed"): + rep.toterminal(self._tw) + return 1 + return 0 + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, startdir=self.startdir, items=session.items + ) + self._write_report_lines_from_hooks(lines) + + def _printcollecteditems(self, items): + # to print out items and their parent collectors + # we take care to leave out Instances aka () + # because later versions are going to get rid of them anyway + if self.config.option.verbose < 0: + if self.config.option.verbose < -1: + counts = {} + for item in items: + name = item.nodeid.split("::", 1)[0] + counts[name] = counts.get(name, 0) + 1 + for name, count in sorted(counts.items()): + self._tw.line("%s: %d" % (name, count)) + else: + for item in items: + nodeid = item.nodeid + nodeid = nodeid.replace("::()::", "::") + self._tw.line(nodeid) + return + stack = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + # if col.name == "()": + # continue + indent = (len(stack) - 1) * " " + self._tw.line("%s%s" % (indent, col)) + + @pytest.hookimpl(hookwrapper=True) + def pytest_sessionfinish(self, exitstatus): + outcome = yield + outcome.get_result() + self._tw.line("") + summary_exit_codes = ( + EXIT_OK, + EXIT_TESTSFAILED, + EXIT_INTERRUPTED, + EXIT_USAGEERROR, + EXIT_NOTESTSCOLLECTED, + ) + if exitstatus in summary_exit_codes: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus + ) + if exitstatus == EXIT_INTERRUPTED: + self._report_keyboardinterrupt() + del self._keyboardinterrupt_memo + self.summary_stats() + + @pytest.hookimpl(hookwrapper=True) + def pytest_terminal_summary(self): + self.summary_errors() + self.summary_failures() + yield + self.summary_warnings() + self.summary_passes() + + def pytest_keyboard_interrupt(self, excinfo): + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self): + if hasattr(self, "_keyboardinterrupt_memo"): + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self): + excrepr = self._keyboardinterrupt_memo + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --fulltrace)", + yellow=True, + ) + + def _locationline(self, nodeid, fspath, lineno, domain): + def mkrel(nodeid): + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # collect_fspath comes from testid which has a "/"-normalized path + + if fspath: + res = mkrel(nodeid).replace("::()", "") # parens-normalization + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + self.startdir.bestrelpath(fspath) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + if hasattr(rep, "location"): + fspath, lineno, domain = rep.location + return domain + else: + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # summaries for sessionfinish + # + def getreports(self, name): + values = [] + for x in self.stats.get(name, []): + if not hasattr(x, "_pdbshown"): + values.append(x) + return values + + def summary_warnings(self): + if self.hasopt("w"): + all_warnings = self.stats.get("warnings") + if not all_warnings: + return + + grouped = itertools.groupby( + all_warnings, key=lambda wr: wr.get_location(self.config) + ) + + self.write_sep("=", "warnings summary", yellow=True, bold=False) + for location, warning_records in grouped: + # legacy warnings show their location explicitly, while standard warnings look better without + # it because the location is already formatted into the message + warning_records = list(warning_records) + if location: + self._tw.line(str(location)) + for w in warning_records: + if location: + lines = w.message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = w.message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line("-- Docs: https://docs.pytest.org/en/latest/warnings.html") + + def summary_passes(self): + if self.config.option.tbstyle != "no": + if self.hasopt("P"): + reports = self.getreports("passed") + if not reports: + return + self.write_sep("=", "PASSES") + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg) + self._outrep_summary(rep) + + def print_teardown_sections(self, rep): + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self): + if self.config.option.tbstyle != "no": + reports = self.getreports("failed") + if not reports: + return + self.write_sep("=", "FAILURES") + for rep in reports: + if self.config.option.tbstyle == "line": + line = self._getcrashline(rep) + self.write_line(line) + else: + msg = self._getfailureheadline(rep) + markup = {"red": True, "bold": True} + self.write_sep("_", msg, **markup) + self._outrep_summary(rep) + for report in self.getreports(""): + if report.nodeid == rep.nodeid and report.when == "teardown": + self.print_teardown_sections(report) + + def summary_errors(self): + if self.config.option.tbstyle != "no": + reports = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if not hasattr(rep, "when"): + # collect + msg = "ERROR collecting " + msg + elif rep.when == "setup": + msg = "ERROR at setup of " + msg + elif rep.when == "teardown": + msg = "ERROR at teardown of " + msg + self.write_sep("_", msg) + self._outrep_summary(rep) + + def _outrep_summary(self, rep): + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self): + session_duration = time.time() - self._sessionstarttime + (line, color) = build_summary_stats_line(self.stats) + msg = "%s in %.2f seconds" % (line, session_duration) + markup = {color: True, "bold": True} + + if self.verbosity >= 0: + self.write_sep("=", msg, **markup) + if self.verbosity == -1: + self.write_line(msg, **markup) + + +def repr_pythonversion(v=None): + if v is None: + v = sys.version_info + try: + return "%s.%s.%s-%s-%s" % v + except (TypeError, ValueError): + return str(v) + + +def build_summary_stats_line(stats): + keys = ("failed passed skipped deselected xfailed xpassed warnings error").split() + unknown_key_seen = False + for key in stats.keys(): + if key not in keys: + if key: # setup/teardown reports have an empty key, ignore them + keys.append(key) + unknown_key_seen = True + parts = [] + for key in keys: + val = stats.get(key, None) + if val: + parts.append("%d %s" % (len(val), key)) + + if parts: + line = ", ".join(parts) + else: + line = "no tests ran" + + if "failed" in stats or "error" in stats: + color = "red" + elif "warnings" in stats or unknown_key_seen: + color = "yellow" + elif "passed" in stats: + color = "green" + else: + color = "yellow" + + return (line, color) + + +def _plugin_nameversions(plugininfo): + values = [] + for plugin, dist in plugininfo: + # gets us name and version! + name = "{dist.project_name}-{dist.version}".format(dist=dist) + # questionable convenience, but it keeps things short + if name.startswith("pytest-"): + name = name[7:] + # we decided to print python package names + # they can have more than one plugin + if name not in values: + values.append(name) + return values diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/terminal.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/terminal.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1624f48320bf612da604632baa160668ebddee59 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/terminal.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/tmpdir.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/tmpdir.py new file mode 100644 index 0000000000000000000000000000000000000000..6287c17051522175aab1e8bbbf5892592340f70c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/tmpdir.py @@ -0,0 +1,187 @@ +""" support for providing temporary directories to test functions. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import re +import tempfile +import warnings + +import attr +import py + +import pytest +from .pathlib import ensure_reset_dir +from .pathlib import LOCK_TIMEOUT +from .pathlib import make_numbered_dir +from .pathlib import make_numbered_dir_with_cleanup +from .pathlib import Path +from _pytest.monkeypatch import MonkeyPatch + + +@attr.s +class TempPathFactory(object): + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option.""" + + _given_basetemp = attr.ib() + _trace = attr.ib() + _basetemp = attr.ib(default=None) + + @classmethod + def from_config(cls, config): + """ + :param config: a pytest configuration + """ + return cls( + given_basetemp=config.option.basetemp, trace=config.trace.get("tmpdir") + ) + + def mktemp(self, basename, numbered=True): + """makes a temporary directory managed by the factory""" + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir() + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename) + self._trace("mktemp", p) + return p + + def getbasetemp(self): + """ return base temporary directory. """ + if self._basetemp is None: + if self._given_basetemp is not None: + basetemp = Path(self._given_basetemp) + ensure_reset_dir(basetemp) + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()) + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath("pytest-of-{}".format(user)) + rootdir.mkdir(exist_ok=True) + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT + ) + assert basetemp is not None + self._basetemp = t = basetemp + self._trace("new basetemp", t) + return t + else: + return self._basetemp + + +@attr.s +class TempdirFactory(object): + """ + backward comptibility wrapper that implements + :class:``py.path.local`` for :class:``TempPathFactory`` + """ + + _tmppath_factory = attr.ib() + + def ensuretemp(self, string, dir=1): + """ (deprecated) return temporary directory path with + the given string as the trailing part. It is usually + better to use the 'tmpdir' function argument which + provides an empty unique-per-test-invocation directory + and is guaranteed to be empty. + """ + # py.log._apiwarn(">1.1", "use tmpdir function argument") + from .deprecated import PYTEST_ENSURETEMP + + warnings.warn(PYTEST_ENSURETEMP, stacklevel=2) + return self.getbasetemp().ensure(string, dir=dir) + + def mktemp(self, basename, numbered=True): + """Create a subdirectory of the base temporary directory and return it. + If ``numbered``, ensure the directory is unique by adding a number + prefix greater than any existing one. + """ + return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self): + """backward compat wrapper for ``_tmppath_factory.getbasetemp``""" + return py.path.local(self._tmppath_factory.getbasetemp().resolve()) + + +def get_user(): + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010). + """ + import getpass + + try: + return getpass.getuser() + except (ImportError, KeyError): + return None + + +def pytest_configure(config): + """Create a TempdirFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmpdir_factory session fixture. + """ + mp = MonkeyPatch() + tmppath_handler = TempPathFactory.from_config(config) + t = TempdirFactory(tmppath_handler) + config._cleanup.append(mp.undo) + mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False) + mp.setattr(config, "_tmpdirhandler", t, raising=False) + mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False) + + +@pytest.fixture(scope="session") +def tmpdir_factory(request): + """Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session. + """ + return request.config._tmpdirhandler + + +@pytest.fixture(scope="session") +def tmp_path_factory(request): + """Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session. + """ + return request.config._tmp_path_factory + + +def _mk_tmp(request, factory): + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@pytest.fixture +def tmpdir(request, tmpdir_factory): + """Return a temporary directory path object + which is unique to each test function invocation, + created as a sub directory of the base temporary + directory. The returned object is a `py.path.local`_ + path object. + + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html + """ + return _mk_tmp(request, tmpdir_factory) + + +@pytest.fixture +def tmp_path(request, tmp_path_factory): + """Return a temporary directory path object + which is unique to each test function invocation, + created as a sub directory of the base temporary + directory. The returned object is a :class:`pathlib.Path` + object. + + .. note:: + + in python < 3.6 this is a pathlib2.Path + """ + + return _mk_tmp(request, tmp_path_factory) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/tmpdir.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/tmpdir.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0e6e77fc8a512a7c5588d58c7dabe2cc6df033f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/tmpdir.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/unittest.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/unittest.py new file mode 100644 index 0000000000000000000000000000000000000000..a38a60d8e68303040eb22c51b684dd668e2c6437 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/unittest.py @@ -0,0 +1,259 @@ +""" discovery and running of std-library "unittest" style tests. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import traceback + +import _pytest._code +from _pytest.compat import getimfunc +from _pytest.config import hookimpl +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.python import Class +from _pytest.python import Function +from _pytest.python import Module +from _pytest.python import transfer_markers + + +def pytest_pycollect_makeitem(collector, name, obj): + # has unittest been imported and is obj a subclass of its TestCase? + try: + if not issubclass(obj, sys.modules["unittest"].TestCase): + return + except Exception: + return + # yes, so let's collect it + return UnitTestCase(name, parent=collector) + + +class UnitTestCase(Class): + # marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs + nofuncargs = True + + def setup(self): + cls = self.obj + if getattr(cls, "__unittest_skip__", False): + return # skipped + setup = getattr(cls, "setUpClass", None) + if setup is not None: + setup() + teardown = getattr(cls, "tearDownClass", None) + if teardown is not None: + self.addfinalizer(teardown) + super(UnitTestCase, self).setup() + + def collect(self): + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + self.session._fixturemanager.parsefactories(self, unittest=True) + loader = TestLoader() + module = self.getparent(Module).obj + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + funcobj = getimfunc(x) + transfer_markers(funcobj, cls, module) + yield TestCaseFunction(name, parent=self, callobj=funcobj) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + if ut is None or runtest != ut.TestCase.runTest: + yield TestCaseFunction("runTest", parent=self) + + +class TestCaseFunction(Function): + nofuncargs = True + _excinfo = None + _testcase = None + + def setup(self): + self._testcase = self.parent.obj(self.name) + self._fix_unittest_skip_decorator() + self._obj = getattr(self._testcase, self.name) + if hasattr(self._testcase, "setup_method"): + self._testcase.setup_method(self._obj) + if hasattr(self, "_request"): + self._request._fillfixtures() + + def _fix_unittest_skip_decorator(self): + """ + The @unittest.skip decorator calls functools.wraps(self._testcase) + The call to functools.wraps() fails unless self._testcase + has a __name__ attribute. This is usually automatically supplied + if the test is a function or method, but we need to add manually + here. + + See issue #1169 + """ + if sys.version_info[0] == 2: + setattr(self._testcase, "__name__", self.name) + + def teardown(self): + if hasattr(self._testcase, "teardown_method"): + self._testcase.teardown_method(self._obj) + # Allow garbage collection on TestCase instance attributes. + self._testcase = None + self._obj = None + + def startTest(self, testcase): + pass + + def _addexcinfo(self, rawexcinfo): + # unwrap potential exception info (see twisted trial support below) + rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo(rawexcinfo) + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except: # noqa + fail( + "ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + + def addFailure(self, testcase, rawexcinfo): + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase, reason): + try: + skip(reason) + except skip.Exception: + self._skipped_by_mark = True + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure(self, testcase, rawexcinfo, reason=""): + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess(self, testcase, reason=""): + self._unexpectedsuccess = reason + + def addSuccess(self, testcase): + pass + + def stopTest(self, testcase): + pass + + def _handle_skip(self): + # implements the skipping machinery (see #2137) + # analog to pythons Lib/unittest/case.py:run + testMethod = getattr(self._testcase, self._testcase._testMethodName) + if getattr(self._testcase.__class__, "__unittest_skip__", False) or getattr( + testMethod, "__unittest_skip__", False + ): + # If the class or method was skipped. + skip_why = getattr( + self._testcase.__class__, "__unittest_skip_why__", "" + ) or getattr(testMethod, "__unittest_skip_why__", "") + try: # PY3, unittest2 on PY2 + self._testcase._addSkip(self, self._testcase, skip_why) + except TypeError: # PY2 + if sys.version_info[0] != 2: + raise + self._testcase._addSkip(self, skip_why) + return True + return False + + def runtest(self): + if self.config.pluginmanager.get_plugin("pdbinvoke") is None: + self._testcase(result=self) + else: + # disables tearDown and cleanups for post mortem debugging (see #1890) + if self._handle_skip(): + return + self._testcase.debug() + + def _prunetraceback(self, excinfo): + Function._prunetraceback(self, excinfo) + traceback = excinfo.traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest") + ) + if traceback: + excinfo.traceback = traceback + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item, call): + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + +# twisted trial support + + +@hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item): + if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: + ut = sys.modules["twisted.python.failure"] + Failure__init__ = ut.Failure.__init__ + check_testcase_implements_trial_reporter() + + def excstore( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + try: + Failure__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) + + ut.Failure.__init__ = excstore + yield + ut.Failure.__init__ = Failure__init__ + else: + yield + + +def check_testcase_implements_trial_reporter(done=[]): + if done: + return + from zope.interface import classImplements + from twisted.trial.itrial import IReporter + + classImplements(TestCaseFunction, IReporter) + done.append(1) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/unittest.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/unittest.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b315de09740d22d9cbd9f453a3b4612a2c285f0 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/unittest.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warning_types.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warning_types.py new file mode 100644 index 0000000000000000000000000000000000000000..55e1f037ae59c504c79aab399ca4828a6751de1f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warning_types.py @@ -0,0 +1,60 @@ +import attr + + +class PytestWarning(UserWarning): + """ + Bases: :class:`UserWarning`. + + Base class for all warnings emitted by pytest. + """ + + +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """ + Bases: :class:`pytest.PytestWarning`, :class:`DeprecationWarning`. + + Warning class for features that will be removed in a future version. + """ + + +class RemovedInPytest4Warning(PytestDeprecationWarning): + """ + Bases: :class:`pytest.PytestDeprecationWarning`. + + Warning class for features scheduled to be removed in pytest 4.0. + """ + + +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """ + Bases: :class:`pytest.PytestWarning`, :class:`FutureWarning`. + + Warning category used to denote experiments in pytest. Use sparingly as the API might change or even be + removed completely in future version + """ + + @classmethod + def simple(cls, apiname): + return cls( + "{apiname} is an experimental api that may change over time".format( + apiname=apiname + ) + ) + + +@attr.s +class UnformattedWarning(object): + """Used to hold warnings that need to format their message at runtime, as opposed to a direct message. + + Using this class avoids to keep all the warning types and messages in this module, avoiding misuse. + """ + + category = attr.ib() + template = attr.ib() + + def format(self, **kwargs): + """Returns an instance of the warning category, formatted with given kwargs""" + return self.category(self.template.format(**kwargs)) + + +PYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple("testdir.copy_example") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warning_types.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warning_types.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35c195bfe05e70fdc572a3e023fd8f8cd160aa2e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warning_types.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warnings.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..52afbe784ddd0bc3ee1b1dc3017e95a03afefff8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warnings.py @@ -0,0 +1,173 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import warnings +from contextlib import contextmanager + +import pytest +from _pytest import compat + + +def _setoption(wmod, arg): + """ + Copy of the warning._setoption function but does not escape arguments. + """ + parts = arg.split(":") + if len(parts) > 5: + raise wmod._OptionError("too many fields (max 5): %r" % (arg,)) + while len(parts) < 5: + parts.append("") + action, message, category, module, lineno = [s.strip() for s in parts] + action = wmod._getaction(action) + category = wmod._getcategory(category) + if lineno: + try: + lineno = int(lineno) + if lineno < 0: + raise ValueError + except (ValueError, OverflowError): + raise wmod._OptionError("invalid lineno %r" % (lineno,)) + else: + lineno = 0 + wmod.filterwarnings(action, message, category, module, lineno) + + +def pytest_addoption(parser): + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="set which warnings to report, see -W option of python itself.", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W and --pythonwarnings.", + ) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings ", + ) + + +@contextmanager +def catch_warnings_for_item(config, ihook, when, item): + """ + Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_captured`` hook. + """ + cmdline_filters = config.getoption("pythonwarnings") or [] + inifilters = config.getini("filterwarnings") + with warnings.catch_warnings(record=True) as log: + + if not sys.warnoptions: + # if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908) + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + # filters should have this precedence: mark, cmdline options, ini + # filters should be applied in the inverse order of precedence + for arg in inifilters: + _setoption(warnings, arg) + + for arg in cmdline_filters: + warnings._setoption(arg) + + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + _setoption(warnings, arg) + + yield + + for warning_message in log: + ihook.pytest_warning_captured.call_historic( + kwargs=dict(warning_message=warning_message, when=when, item=item) + ) + + +def warning_record_to_str(warning_message): + """Convert a warnings.WarningMessage to a string, taking in account a lot of unicode shenaningans in Python 2. + + When Python 2 support is dropped this function can be greatly simplified. + """ + warn_msg = warning_message.message + unicode_warning = False + if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args): + new_args = [] + for m in warn_msg.args: + new_args.append( + compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m + ) + unicode_warning = list(warn_msg.args) != new_args + warn_msg.args = new_args + + msg = warnings.formatwarning( + warn_msg, + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + if unicode_warning: + warnings.warn( + "Warning is using unicode non convertible to ascii, " + "converting to a safe representation:\n {!r}".format(compat.safe_str(msg)), + UnicodeWarning, + ) + return msg + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_protocol(item): + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + yield + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_collection(session): + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_terminal_summary(terminalreporter): + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + yield + + +def _issue_config_warning(warning, config): + """ + This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage: + at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured + hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891. + + :param warning: the warning instance. + :param config: + """ + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + warnings.warn(warning, stacklevel=2) + config.hook.pytest_warning_captured.call_historic( + kwargs=dict(warning_message=records[0], when="config", item=None) + ) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warnings.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warnings.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11ead5e3c587891af232bf79ba0bee52e832161b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_pytest/warnings.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/_scandir.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_scandir.so new file mode 100755 index 0000000000000000000000000000000000000000..ff9ab701b848b1d43578293667448d81b5c8c5e5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/_scandir.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/INSTALLER b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/LICENSE.rst b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..1110225936f4e04989a86a0dd1c6a484352ec8e0 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/LICENSE.rst @@ -0,0 +1,26 @@ +Copyright (c) 2011-2017, Astropy Developers + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of the Astropy Team nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/METADATA b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..8b078d95cc07787e13b116acddaf6182066c3bb6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/METADATA @@ -0,0 +1,32 @@ +Metadata-Version: 2.1 +Name: astropy +Version: 2.0.9 +Summary: Community-developed python astronomy tools +Home-page: http://astropy.org +Author: The Astropy Developers +Author-email: astropy.team@gmail.com +License: BSD +Keywords: astronomy,astrophysics,cosmology,space,science,units,table,wcs,vo,samp,coordinate,fits,modeling,models,fitting,ascii +Platform: UNKNOWN +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: C +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Scientific/Engineering :: Astronomy +Classifier: Topic :: Scientific/Engineering :: Physics +Requires: numpy +Provides: astropy +Requires-Python: >=2.7 +Requires-Dist: pytest (>=2.8) +Requires-Dist: numpy (>=1.9.0) + +Astropy is a package intended to contain core functionality and some +common tools needed for performing astronomy and astrophysics research with +Python. It also provides an index for other astronomy packages and tools for +managing them. + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/RECORD b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..47060a1634f2359a17b1fb8cfa7a3b99c5e54f3b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/RECORD @@ -0,0 +1,1660 @@ +../../../bin/fits2bitmap,sha256=az4wkKXM0lPHRdiXzsXPz2QB3JA65B4dH0SGw5Zatik,310 +../../../bin/fitscheck,sha256=0r8IvjsWVL7T3KY8bDe1x-YWBhiiR_Xw8ox6MXR6pL0,302 +../../../bin/fitsdiff,sha256=iwb0WrJf0oLGzHErsf7NNmuB8F_Pp1f_qdMLa9b_d8k,301 +../../../bin/fitsheader,sha256=FcXKP1GV8h5Z4kQmP45uQzg3fG9EwzJ3tF0tWTJ55k0,303 +../../../bin/fitsinfo,sha256=nL7X2k6I_TbKEd1MnBTgrE5ER8HvzKBJ4bc-iXpeI6I,301 +../../../bin/samp_hub,sha256=zTQZNkR2eWey8hG3S4M4lPT4kPaq0qQ72SKa8n9kcig,304 +../../../bin/volint,sha256=qFB9vup_c3ITAAOq986uv8BHD8UH24bkZfNWCdltpDo,294 +../../../bin/wcslint,sha256=L2X2SHhM2Jkm75kdRivLvfEkyAGML2dl3losQyqMFOM,288 +astropy-2.0.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +astropy-2.0.9.dist-info/LICENSE.rst,sha256=A3EvQ_7eUwFVty49RJe-3PasHE2Bufbk2V4Idv7JEto,1496 +astropy-2.0.9.dist-info/METADATA,sha256=QWBNb-rXclIWnETHylpATLu_S0ASQEfWehPYWraLCRQ,1228 +astropy-2.0.9.dist-info/RECORD,, +astropy-2.0.9.dist-info/WHEEL,sha256=-Crjs1WwpTj5CCeFg4GKXWPpZsiCLs9UbQGH1WBfXpw,105 +astropy-2.0.9.dist-info/entry_points.txt,sha256=LmQsCnhaRayP-5K7eXTyPu7Mh6KC5CLw9EF7D4ifI7E,403 +astropy-2.0.9.dist-info/top_level.txt,sha256=1x1TQBA_cdZt34TYpofZyvtgiaMGVAY3uBS-2PBXGTs,8 +astropy/CITATION,sha256=6LBe9CgGLu3NDUx4uSW7WoKgZDUfCupqBXSeHCXsLcI,5963 +astropy/__init__.py,sha256=QLR9C9JOGE5jFHFfN5abEl4qY0qS6j8x4hm6M-LrvOg,10974 +astropy/__init__.pyc,, +astropy/_compiler.so,sha256=m08TOKqOrg8BxaFudtPKacCzrdTKB9ZgDat4Heo0tOM,23584 +astropy/_erfa/__init__.py,sha256=jR5R__cXBAUejdVXEXj2-InlgUpG4n5d5dGBpTPati8,222 +astropy/_erfa/__init__.pyc,, +astropy/_erfa/_core.so,sha256=25NfYKmwO6PZ1eI2fuAP6YF6GXaVlxE8vzm1Htz94jE,1065168 +astropy/_erfa/core.py,sha256=ejh_IiQvuCpjsEbIy4uNRz-jeELSwwt37xy3IvBmbcM,857075 +astropy/_erfa/core.pyc,, +astropy/_erfa/erfa_generator.py,sha256=OkZ5KXw3GGx3r30hG755wqDmuafIoZuDxkc-4pdcCmI,20509 +astropy/_erfa/erfa_generator.pyc,, +astropy/_erfa/setup_package.py,sha256=QMep_kEix5AAqU_rhbjXHiUU2OQo6iGfapp3M7RZT58,3850 +astropy/_erfa/setup_package.pyc,, +astropy/_erfa/tests/__init__.py,sha256=ykqVHge2EmIDTMOd96h2DyGHaM_gpp_wKz3K1MlZZic,64 +astropy/_erfa/tests/__init__.pyc,, +astropy/_erfa/tests/test_erfa.py,sha256=zlBF5sdugjHo3ht9A22yR0KYk2d-Wb32hHVY1oKBsHM,7986 +astropy/_erfa/tests/test_erfa.pyc,, +astropy/analytic_functions/__init__.py,sha256=OTJ8mxbG0LJiB_BcNAy8HTFPb1iWa6YPW6hB_ebBV1g,350 +astropy/analytic_functions/__init__.pyc,, +astropy/analytic_functions/blackbody.py,sha256=GpbjEJ_I2tTjUsniKWWixol4jgMgZZJlL_4FV2WjmHI,2225 +astropy/analytic_functions/blackbody.pyc,, +astropy/analytic_functions/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/analytic_functions/tests/__init__.pyc,, +astropy/analytic_functions/tests/test_blackbody.py,sha256=ESOemnPYbgmwXCCkO9zOYk3RKFILHRk-e_xUtP6-kdY,722 +astropy/analytic_functions/tests/test_blackbody.pyc,, +astropy/astropy.cfg,sha256=Y42DfXKKEDIUjI88iRZu4Y6OookS27wFuYqqOk_aG2I,4688 +astropy/config/__init__.py,sha256=mdPkQFFJpl-BhXd34GBXOq_ZakKQgXU8YvoxLeqXDTw,412 +astropy/config/__init__.pyc,, +astropy/config/affiliated.py,sha256=Nh59kjSsK6n7Kx_Kre0Ej-L5NfsUAy38gDBmdIso5bA,309 +astropy/config/affiliated.pyc,, +astropy/config/configuration.py,sha256=6AdR1BT9RsqhgPbrjkJDOstyG22UQRwyH8hOi5nwfmU,25037 +astropy/config/configuration.pyc,, +astropy/config/paths.py,sha256=H8Im4KdJA6gIjSZkip8i4pan1TqRkJvPLWbEPgxDfUI,10744 +astropy/config/paths.pyc,, +astropy/config/setup_package.py,sha256=Yn1F4foyxsZPPKsTazQl1KkJ8NyoPzykvvkSTc5ZQxk,201 +astropy/config/setup_package.pyc,, +astropy/config/tests/__init__.py,sha256=vnxqCkx7tTOfFMhgE7GuShCWxdMidhLZOL13el78R68,109 +astropy/config/tests/__init__.pyc,, +astropy/config/tests/data/alias.cfg,sha256=d3h1sePCQFWzcf-6WoVUaf1YHw0Qzh0-4muuuPgV1sM,54 +astropy/config/tests/data/astropy.0.3.cfg,sha256=sTapCvYvZx3DGDbh-ZE0Spi94eoUpTH5okohx4jzjjw,6863 +astropy/config/tests/data/astropy.0.3.windows.cfg,sha256=eOpEQxjenwIH3VZeWvImtQWhdgZr2o5To1oAm6JZsN0,7012 +astropy/config/tests/data/deprecated.cfg,sha256=dxQhS8_DMP2gtuvj7vTiN65dWKSkxFD2peCs_8inl24,30 +astropy/config/tests/data/empty.cfg,sha256=_GxA6LBV6vpRoY-h1d5TTNLtQny-wSNEQnzC-1MU32M,422 +astropy/config/tests/data/not_empty.cfg,sha256=LA_5YRziJ5evBWv4K_iS-C5ABSAk4EuFMMoJdQiYWtQ,420 +astropy/config/tests/test_configs.py,sha256=6-hwLdwnHOCnfUMbWOygKhnEmz-27PIiFn7hjQelU3A,10071 +astropy/config/tests/test_configs.pyc,, +astropy/conftest.py,sha256=pF6SXw8l2X2PtLMn9kBAvkDhWn9SavBK_hUCSbXikfg,1112 +astropy/conftest.pyc,, +astropy/constants/__init__.py,sha256=wL3QWOtwIxFMvgDB7c_-iJO7s96jOGbSKlUy980C7FI,1739 +astropy/constants/__init__.pyc,, +astropy/constants/astropyconst13.py,sha256=jKZa_9631St3jvJd4UCDfPPx8uo7PPALrXyhCCIMDF4,644 +astropy/constants/astropyconst13.pyc,, +astropy/constants/astropyconst20.py,sha256=4YwKYZLKHbGYEY6GYLC9AaGHfHJjB0lrcWwihW2qit4,633 +astropy/constants/astropyconst20.pyc,, +astropy/constants/cgs.py,sha256=aigjQfMG3aos-TpWZoCMV8GvcEFL_5mU4oFz3zbYBBY,677 +astropy/constants/cgs.pyc,, +astropy/constants/codata2010.py,sha256=UV5o-CKdX_YxAf1vZgPgA1lFOW2P7cGpxxI1VhcdRjE,4104 +astropy/constants/codata2010.pyc,, +astropy/constants/codata2014.py,sha256=cCDeD3imLGmQl3hUOs00keRxLexHkwZq5jUezcbST_0,3789 +astropy/constants/codata2014.pyc,, +astropy/constants/constant.py,sha256=s-hHfeZlzKjIQSMxwmtk4GAUJ6gGhnSljVEoiDPhz_A,8796 +astropy/constants/constant.pyc,, +astropy/constants/iau2012.py,sha256=OJ3wgy-CHCoROXBtAMqobXbiH1eg2xD0pvDANPwIdLQ,2535 +astropy/constants/iau2012.pyc,, +astropy/constants/iau2015.py,sha256=9X5Pll_y7ELW0Y-xSO-k6fwRAlPznG7aL02xX2nlgCY,3462 +astropy/constants/iau2015.pyc,, +astropy/constants/setup_package.py,sha256=82VuCVSU5qR1xNbgTtuQ-m9p-q3dpRAP499jMIfi3Hs,104 +astropy/constants/setup_package.pyc,, +astropy/constants/si.py,sha256=KgWe26CX_HhSdAYWJ7S5tacNH8o20HWIWL5IwOOjKsM,659 +astropy/constants/si.pyc,, +astropy/constants/tests/__init__.py,sha256=vnxqCkx7tTOfFMhgE7GuShCWxdMidhLZOL13el78R68,109 +astropy/constants/tests/__init__.pyc,, +astropy/constants/tests/test_constant.py,sha256=JZujuJExIBpxSLz4HrOVWig5Y-HImvp3rXlSmnvPrJA,4601 +astropy/constants/tests/test_constant.pyc,, +astropy/constants/tests/test_pickle.py,sha256=8_G-PSYqv_ze08KIPEuXB1v__d8TXWyRzchUTD9iUVI,757 +astropy/constants/tests/test_pickle.pyc,, +astropy/constants/tests/test_prior_version.py,sha256=oofRuTMSb-jW_8cvaiDCSgFNWEQc3BbG3SJRsSk2ifY,4326 +astropy/constants/tests/test_prior_version.pyc,, +astropy/convolution/__init__.py,sha256=rAwtZ8XPUv6oZHr7io5t_nrWbwa972gnjNOEBZO1uqU,459 +astropy/convolution/__init__.pyc,, +astropy/convolution/boundary_extend.so,sha256=A69ujnxTZiAzk7YiPN1xgOtEc6_wf9I2-XFcNibnMD4,291200 +astropy/convolution/boundary_fill.so,sha256=gBgxP3obCVZUyWz4WwG1A96hDaolnyifsSV6Q9bEotM,300688 +astropy/convolution/boundary_none.so,sha256=TDlzQdHS1dFjNPjOp-IBYzDT0Pf3-wpItjiIt5sqy3M,303824 +astropy/convolution/boundary_wrap.so,sha256=5MV_Ao8D_T_KsuClZKTfakhwzzTef4se2kCYwWs3luA,314304 +astropy/convolution/convolve.py,sha256=E7E0TfnRVIb7aY33hjMxopAGFwZUZ6Kn5NMQzZD6ZvM,36130 +astropy/convolution/convolve.pyc,, +astropy/convolution/core.py,sha256=_Bfh6GQc0W03RWyC0diO2dEGvcsvRDVoI5L4IlLCSoM,11751 +astropy/convolution/core.pyc,, +astropy/convolution/kernels.py,sha256=_mzE_lwXfbKFgAzGtnnjLmji5BhfYWa2GfHEIuJ4_1I,32465 +astropy/convolution/kernels.pyc,, +astropy/convolution/setup_package.py,sha256=82VuCVSU5qR1xNbgTtuQ-m9p-q3dpRAP499jMIfi3Hs,104 +astropy/convolution/setup_package.pyc,, +astropy/convolution/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/convolution/tests/__init__.pyc,, +astropy/convolution/tests/test_convolve.py,sha256=pbToWty-E0-1VhzodEj2EfVRnrhDGdEYWvWca-UgsIk,33807 +astropy/convolution/tests/test_convolve.pyc,, +astropy/convolution/tests/test_convolve_fft.py,sha256=5eTU6a8kVyjiT1k_EnFMD956-IDnSTZcUA8f-IKcPic,23795 +astropy/convolution/tests/test_convolve_fft.pyc,, +astropy/convolution/tests/test_convolve_kernels.py,sha256=Wjqvktsp3rs0N-9O6ZxS46nx2VbztBTOzbhhM4l675k,4184 +astropy/convolution/tests/test_convolve_kernels.pyc,, +astropy/convolution/tests/test_convolve_models.py,sha256=a7si9TAr8mwKi3GKAqgENAHOBm_iwX47l_d6rrAXdi0,4036 +astropy/convolution/tests/test_convolve_models.pyc,, +astropy/convolution/tests/test_convolve_nddata.py,sha256=XhqnTT114GpgqGQ_NrY4Ws4k3pvBtLBINlqyfQfUzEc,1829 +astropy/convolution/tests/test_convolve_nddata.pyc,, +astropy/convolution/tests/test_convolve_speeds.py,sha256=DC9kVvMdHoLSFSCJpKZgwNX-B27JgBUiCmXRh81wAsM,12282 +astropy/convolution/tests/test_convolve_speeds.pyc,, +astropy/convolution/tests/test_discretize.py,sha256=K0ondnH2wxMJbwNKVFqv3a3OKu9SL6LvW21o0mpJLng,6198 +astropy/convolution/tests/test_discretize.pyc,, +astropy/convolution/tests/test_kernel_class.py,sha256=BpYl1MrRmx853YxnH1K7b-kqgYfumI2FSpMhoqp4-mw,18917 +astropy/convolution/tests/test_kernel_class.pyc,, +astropy/convolution/tests/test_pickle.py,sha256=hsALOsOA4RcDgpvqBSnixXAyFqP9puAkFjXXeyOHRVE,1046 +astropy/convolution/tests/test_pickle.pyc,, +astropy/convolution/utils.py,sha256=kixpU6j3upbSrDAIRGWTa3rWyCfMKiq5mk3lWkZ__aE,10609 +astropy/convolution/utils.pyc,, +astropy/coordinates/__init__.py,sha256=K26dAbDRPSWAj0X6PIQZWN0Rz49IvDynTUitjIg8w9A,1475 +astropy/coordinates/__init__.pyc,, +astropy/coordinates/angle_lextab.py,sha256=nHBw7SCnek7C00dFs8QFU0eBhM3Ko75qY9rQM_C1nHM,3441 +astropy/coordinates/angle_lextab.pyc,, +astropy/coordinates/angle_parsetab.py,sha256=UZJo32hwmlmOd4F7kgYFX3taIj4sXFwW7_pPaMJDWyo,4813 +astropy/coordinates/angle_parsetab.pyc,, +astropy/coordinates/angle_utilities.py,sha256=k4xgFd9jlFo1QRXPDu1wE1vpQtGH29j-7dTPY6E4bIc,20435 +astropy/coordinates/angle_utilities.pyc,, +astropy/coordinates/angles.py,sha256=X5y-1YrgagyQVSxLL0ekdIAp5e22GyoFE8KOym6SJdM,25700 +astropy/coordinates/angles.pyc,, +astropy/coordinates/attributes.py,sha256=oeVgG-OpM2s9K-z-hnxHCm-I_wx5S8AEBfwxA7ZMXKE,19016 +astropy/coordinates/attributes.pyc,, +astropy/coordinates/baseframe.py,sha256=B1vASgxoPTQx9ajulODGpfuS0EjyHnUN16YOSHCzzl0,59163 +astropy/coordinates/baseframe.pyc,, +astropy/coordinates/builtin_frames/__init__.py,sha256=XGhW9FHIsFSOceG03Py29mYdfYRWgINGggNaEyer-B4,5052 +astropy/coordinates/builtin_frames/__init__.pyc,, +astropy/coordinates/builtin_frames/altaz.py,sha256=mJY-u1n6OUHoMiLYrgiJfUofD-qeCh1HIbXhsGmD2X4,6991 +astropy/coordinates/builtin_frames/altaz.pyc,, +astropy/coordinates/builtin_frames/baseradec.py,sha256=nUSovAwaiMSOZhSGZ3Dol5SKA97XD8VrNQPbHPlqjHI,4080 +astropy/coordinates/builtin_frames/baseradec.pyc,, +astropy/coordinates/builtin_frames/cirs.py,sha256=k1cysgNrKFNZqjmDrLsHTq5koApbCDTlbUcmGaTCBL8,1008 +astropy/coordinates/builtin_frames/cirs.pyc,, +astropy/coordinates/builtin_frames/cirs_observed_transforms.py,sha256=gwo3BZYW1EzRD0xX9xSpibkJ2yWblDoGtRXfHV15P0U,6336 +astropy/coordinates/builtin_frames/cirs_observed_transforms.pyc,, +astropy/coordinates/builtin_frames/ecliptic.py,sha256=NWlxZM8tdof_sgXBxAR_izM-n8wQq5Nn1l44YPBIY04,7209 +astropy/coordinates/builtin_frames/ecliptic.pyc,, +astropy/coordinates/builtin_frames/ecliptic_transforms.py,sha256=kemve2_7qlJ1ctIE10PfoCAsneZoMxf05FkyzhiD7PI,5107 +astropy/coordinates/builtin_frames/ecliptic_transforms.pyc,, +astropy/coordinates/builtin_frames/fk4.py,sha256=fDPH_enUg2UguO6kwKeYlXVSrdyusrl3uakc_oZ0TTE,7491 +astropy/coordinates/builtin_frames/fk4.pyc,, +astropy/coordinates/builtin_frames/fk4_fk5_transforms.py,sha256=EsHzkNE13AFQbvB7sAaJ6lp5ogdFeI7lVISabvp5HHQ,2755 +astropy/coordinates/builtin_frames/fk4_fk5_transforms.pyc,, +astropy/coordinates/builtin_frames/fk5.py,sha256=y5CQXvHNP1d7D0OsRRlsMkFhimgdVxEZ1So6fB8W2-4,1978 +astropy/coordinates/builtin_frames/fk5.pyc,, +astropy/coordinates/builtin_frames/galactic.py,sha256=LvNjz9OZ8rliOcu5HHjVgvpeorh9uHgr12SXmmyD33g,5646 +astropy/coordinates/builtin_frames/galactic.pyc,, +astropy/coordinates/builtin_frames/galactic_transforms.py,sha256=a23aFNUDErZS1D9PJHOBnbBcZpO25PJOBQ1jIadFjaM,1908 +astropy/coordinates/builtin_frames/galactic_transforms.pyc,, +astropy/coordinates/builtin_frames/galactocentric.py,sha256=hIob5buwgNVVT89PgAUGKiRRwM4VIn7--2ZkErgSPkE,13947 +astropy/coordinates/builtin_frames/galactocentric.pyc,, +astropy/coordinates/builtin_frames/gcrs.py,sha256=RuZmiFXsFNYkJvzxlXFufdTAapp-roM7-OkuoFxqbNU,4857 +astropy/coordinates/builtin_frames/gcrs.pyc,, +astropy/coordinates/builtin_frames/hcrs.py,sha256=H13yYgMU6G_ht0ZvtrfriTY-yko4sc-XOcNIeqjCcpg,1526 +astropy/coordinates/builtin_frames/hcrs.pyc,, +astropy/coordinates/builtin_frames/icrs.py,sha256=b9f9nCNSBCvl6mfEsz-j3Iss44oFBxNIziBWIBxtRuE,926 +astropy/coordinates/builtin_frames/icrs.pyc,, +astropy/coordinates/builtin_frames/icrs_cirs_transforms.py,sha256=kaaz5BPE_Lzj9dLydN-gqU5w8WRW6hC9Sos6VbbbCzE,15794 +astropy/coordinates/builtin_frames/icrs_cirs_transforms.pyc,, +astropy/coordinates/builtin_frames/icrs_fk5_transforms.py,sha256=xzbvpPD09lf95mWhsApl7c_J3qi3NEj7hxD5uoUtbis,1605 +astropy/coordinates/builtin_frames/icrs_fk5_transforms.pyc,, +astropy/coordinates/builtin_frames/intermediate_rotation_transforms.py,sha256=nopT9TAHPMl7ryv9DET7MhIMpTcBJS1ggb6JKd7M4-0,5070 +astropy/coordinates/builtin_frames/intermediate_rotation_transforms.pyc,, +astropy/coordinates/builtin_frames/itrs.py,sha256=39B51lBJ3YRe2cUj2Cq9HZMBuRpAKI0gpPrQoqKJHLw,1382 +astropy/coordinates/builtin_frames/itrs.pyc,, +astropy/coordinates/builtin_frames/lsr.py,sha256=asaw2jSquZ8jXH3RfMqsL3X5Q6Ts7b2z4CunPlBTabQ,9157 +astropy/coordinates/builtin_frames/lsr.pyc,, +astropy/coordinates/builtin_frames/skyoffset.py,sha256=-Ze9yYAZHfT9LVlR5-cqJC-3kgh_sZsoh1q4w1UAWvw,10889 +astropy/coordinates/builtin_frames/skyoffset.pyc,, +astropy/coordinates/builtin_frames/supergalactic.py,sha256=AazxVuRMbsXZvwPe8_E2XcXK1AQHKnSSctZcbiTW8Js,4206 +astropy/coordinates/builtin_frames/supergalactic.pyc,, +astropy/coordinates/builtin_frames/supergalactic_transforms.py,sha256=cr-rVE0n2fsp8NOKQwWUlVqXHpheutweCa1ce5wzdRA,960 +astropy/coordinates/builtin_frames/supergalactic_transforms.pyc,, +astropy/coordinates/builtin_frames/utils.py,sha256=dCnqSaeGpjUjsHqtEqHqnqXPjWbvOFPLw1dVoITk6ys,10031 +astropy/coordinates/builtin_frames/utils.pyc,, +astropy/coordinates/calculation.py,sha256=z7j_dRXOfKSAQH2D8COU3HTXSvFIClXamJYCHmm5pZg,5301 +astropy/coordinates/calculation.pyc,, +astropy/coordinates/data/constellation_data_roman87.dat,sha256=PmRuPaf93DSuxj3igotZiQ8E9jLcOCBCJZYRzQ9jlBw,10799 +astropy/coordinates/data/constellation_names.dat,sha256=wRuXg8Jg-fkZ-w4uDpfBA_u5nnwHSXObgGUd5IkU-Yk,1229 +astropy/coordinates/data/sites.json,sha256=DhOM8whW4beIxxLPOG4C816rT-dtChFKzgKHWUL1M9A,425 +astropy/coordinates/distances.py,sha256=U0KXuKMylTGI9v9YAz55FOzHGK9dkt5o5I1IShNh-_Q,6913 +astropy/coordinates/distances.pyc,, +astropy/coordinates/earth.py,sha256=papNv3ukQ9WKi_n2z89iL-rGO1gEmDhXYBcxIUI7YpY,29599 +astropy/coordinates/earth.pyc,, +astropy/coordinates/earth_orientation.py,sha256=PHJopbt5RrEWorI9BoLsANYPc4SYregM0dTw7wMXczU,14063 +astropy/coordinates/earth_orientation.pyc,, +astropy/coordinates/errors.py,sha256=tp0ChX287v7-QkhjLHJFANY79whzr0mdooket-SZLwE,4696 +astropy/coordinates/errors.pyc,, +astropy/coordinates/funcs.py,sha256=Tn-ibEXAIBbBxFnXAi2IHJcMPOmZkgBlm6yPs0AfeuY,9916 +astropy/coordinates/funcs.pyc,, +astropy/coordinates/matching.py,sha256=ECpP2et7eHXFfVj4dB97Nh9J2Kw1LQQTAR9SebNPSek,19988 +astropy/coordinates/matching.pyc,, +astropy/coordinates/matrix_utilities.py,sha256=Sdln8SIwVR3exTLsP3y6TnosuSS0_T-G6pq8NuPpud8,4201 +astropy/coordinates/matrix_utilities.pyc,, +astropy/coordinates/name_resolve.py,sha256=HH-_l1uzFNltU-UAU0vV69z0bLYbo1VC6QLhIePNsX8,5732 +astropy/coordinates/name_resolve.pyc,, +astropy/coordinates/orbital_elements.py,sha256=jwqXQPzi0sOLB5ZXYMK5cnKE1NSK5Ro3QfVKMOCpw_U,7619 +astropy/coordinates/orbital_elements.pyc,, +astropy/coordinates/representation.py,sha256=AdpDupdl1iS226pVidjxGzGCNpUKbTR7EgvMH57jvSw,113128 +astropy/coordinates/representation.pyc,, +astropy/coordinates/setup_package.py,sha256=flWIMnr5VmZFnYyBgNSxGpMEZbFRbGBQDpmp6GQH2qg,261 +astropy/coordinates/setup_package.pyc,, +astropy/coordinates/sites.py,sha256=OfByJsxgxaeQjJlcLA2DtFyfVG4qE1tpgVc-1i6qVRQ,4954 +astropy/coordinates/sites.pyc,, +astropy/coordinates/sky_coordinate.py,sha256=hVNUm4TwdfYbPTnNQTF79ygUHqpmRGVcm7R2bIEGjrk,81737 +astropy/coordinates/sky_coordinate.pyc,, +astropy/coordinates/solar_system.py,sha256=Fhwr8n6HuzK4G6jNl9nFQC4f5IcnNke8mt-tNaT2UIE,19082 +astropy/coordinates/solar_system.pyc,, +astropy/coordinates/tests/__init__.py,sha256=vnxqCkx7tTOfFMhgE7GuShCWxdMidhLZOL13el78R68,109 +astropy/coordinates/tests/__init__.pyc,, +astropy/coordinates/tests/accuracy/__init__.py,sha256=gjottWXA3u6Pjh7PGegQX4qQ4Vsgt5z9Qe-L4-XtUaI,433 +astropy/coordinates/tests/accuracy/__init__.pyc,, +astropy/coordinates/tests/accuracy/fk4_no_e_fk4.csv,sha256=r81QZ_IN5dfLqYRkQ8uFvjpmFotdQviad-LAVMOTl-g,18969 +astropy/coordinates/tests/accuracy/fk4_no_e_fk5.csv,sha256=ACTkFRdOch9uP73pd6mZ0mqJuXUE8r1otWEkS25mOaA,22596 +astropy/coordinates/tests/accuracy/galactic_fk4.csv,sha256=rAjS7n0TPg15Nl7AMR603-um5NEv7Yqh1ReaGHhHkoc,20802 +astropy/coordinates/tests/accuracy/generate_ref_ast.py,sha256=X80cWX3eGL50A2Lucki77QQZfhm4kt8CphyuRKEJE-A,9847 +astropy/coordinates/tests/accuracy/generate_ref_ast.pyc,, +astropy/coordinates/tests/accuracy/icrs_fk5.csv,sha256=OamfG9ZtrgHmtuH61BhkGswePk6M_6NeyXIjs7hGw4M,20793 +astropy/coordinates/tests/accuracy/test_altaz_icrs.py,sha256=DZPU4KoofF_HNOLBfZCaJvSLwEhhXYU7EAsERzQmp5w,8452 +astropy/coordinates/tests/accuracy/test_altaz_icrs.pyc,, +astropy/coordinates/tests/accuracy/test_ecliptic.py,sha256=kzdSQF7nRsy7QbfUl-JJgyet2T1gvNRq6irnLxIoWNw,4018 +astropy/coordinates/tests/accuracy/test_ecliptic.pyc,, +astropy/coordinates/tests/accuracy/test_fk4_no_e_fk4.py,sha256=nI9aLAczVAzcMTf9B8WPDPf4Qj570okVy2d0xJDLTyc,2191 +astropy/coordinates/tests/accuracy/test_fk4_no_e_fk4.pyc,, +astropy/coordinates/tests/accuracy/test_fk4_no_e_fk5.py,sha256=cPOvIrU4J3cHhiTpCdvwfRx0J7GW6uwcIUTFTFgWmjY,2320 +astropy/coordinates/tests/accuracy/test_fk4_no_e_fk5.pyc,, +astropy/coordinates/tests/accuracy/test_galactic_fk4.py,sha256=-QaPJ96okg5rsYnaNAaZ4mAnx_pRxVz7qSf4U_-HsB8,2072 +astropy/coordinates/tests/accuracy/test_galactic_fk4.pyc,, +astropy/coordinates/tests/accuracy/test_icrs_fk5.py,sha256=7TUX_h96Nj1GFvQqzmej65hLuu17lVb_E4I0z_x7Oe8,1992 +astropy/coordinates/tests/accuracy/test_icrs_fk5.pyc,, +astropy/coordinates/tests/test_angles.py,sha256=zaNXnBUQ7XvwRpCqGMKq0JBQTn_NCDMip3XOKNaA6_0,29049 +astropy/coordinates/tests/test_angles.pyc,, +astropy/coordinates/tests/test_angular_separation.py,sha256=nD-2FoOhnrW3lq5xkpG_BT7so1CG-0ILThtnFh_lRDM,3260 +astropy/coordinates/tests/test_angular_separation.pyc,, +astropy/coordinates/tests/test_api_ape5.py,sha256=PcC5QgeKbQmDYjRCKKobiojck_KsXfLxL2uK4PH2K0Q,21144 +astropy/coordinates/tests/test_api_ape5.pyc,, +astropy/coordinates/tests/test_arrays.py,sha256=SSOJNUPovI-0T77PHWMRDl17ZKaCJbxDDyIzcVlQJrc,8604 +astropy/coordinates/tests/test_arrays.pyc,, +astropy/coordinates/tests/test_atc_replacements.py,sha256=4K5z3QAT1MK-X_0w2VgS9tcT19OoJVQ5ipAp4wFwBcE,1190 +astropy/coordinates/tests/test_atc_replacements.pyc,, +astropy/coordinates/tests/test_celestial_transformations.py,sha256=ibnmHd7afxq2rwmFpTYoblScWzHtP93_s72OFYrFQ5A,11663 +astropy/coordinates/tests/test_celestial_transformations.pyc,, +astropy/coordinates/tests/test_distance.py,sha256=sAyUGOV3FKpjvt8ms5qdwTSY5F85T8QD9ydvQv7Gs-M,7898 +astropy/coordinates/tests/test_distance.pyc,, +astropy/coordinates/tests/test_earth.py,sha256=0BBZvz-Yu5ujVHPKnPNg2iqiBWr3BATQltzw_27654o,13673 +astropy/coordinates/tests/test_earth.pyc,, +astropy/coordinates/tests/test_finite_difference_velocities.py,sha256=ZrtGRFk8KaJR7i7ETNIiNNTpcwiGdxy34KHzOJH6Pnw,9569 +astropy/coordinates/tests/test_finite_difference_velocities.pyc,, +astropy/coordinates/tests/test_formatting.py,sha256=udS0eHdRnK3S2QIYPxISVXp2TI3P3b30LXlxFTJCQ4w,4842 +astropy/coordinates/tests/test_formatting.pyc,, +astropy/coordinates/tests/test_frames.py,sha256=ENDFys_kze4tkVIE7eg9PDKD0WeYdaABAu8baYUbHBw,30098 +astropy/coordinates/tests/test_frames.pyc,, +astropy/coordinates/tests/test_frames_with_velocity.py,sha256=15eD8c4ojhhNA6tzHxI4g616TK__-S39w0SpkStyUO0,10119 +astropy/coordinates/tests/test_frames_with_velocity.pyc,, +astropy/coordinates/tests/test_funcs.py,sha256=E2qeG2E7jKf682IfFcMxmDMHY6HAdhSaIueVR5f0Oa0,2515 +astropy/coordinates/tests/test_funcs.pyc,, +astropy/coordinates/tests/test_iau_fullstack.py,sha256=pZ2hjIwxGEqEY-dCy9uzlENV-ct3vR11jOnPusKE1ns,7659 +astropy/coordinates/tests/test_iau_fullstack.pyc,, +astropy/coordinates/tests/test_intermediate_transformations.py,sha256=BWhw-ILiBnHavMM4qRzvou0xPYmQMw6T8uRpRHBeftM,20488 +astropy/coordinates/tests/test_intermediate_transformations.pyc,, +astropy/coordinates/tests/test_matching.py,sha256=49uxLseAmpUMMl6AiGkZkt7P30pCseiYFlPa_lkWSPg,11099 +astropy/coordinates/tests/test_matching.pyc,, +astropy/coordinates/tests/test_matrix_utilities.py,sha256=TIepL93AStEvVhCXH2O-NSpzSwww8CaIeI3oT_OjmWU,1741 +astropy/coordinates/tests/test_matrix_utilities.pyc,, +astropy/coordinates/tests/test_name_resolve.py,sha256=3TEGwuHFxSxtyn0OXJ9syzo6r7n5fd6KEBAfA10NpyY,4778 +astropy/coordinates/tests/test_name_resolve.pyc,, +astropy/coordinates/tests/test_pickle.py,sha256=PLtH0sCddovoaYOkz9Nnb_fNo1YvMw7s1U9KyHe66MM,1861 +astropy/coordinates/tests/test_pickle.pyc,, +astropy/coordinates/tests/test_regression.py,sha256=qw2_7lwa7w2f3UFB56Q7HKft5nExIm-XscqOrqWnTpk,22378 +astropy/coordinates/tests/test_regression.pyc,, +astropy/coordinates/tests/test_representation.py,sha256=bszeNvdWuGbroFS_aV9sqAueTY6-uiC_aByNkjVQLX0,50622 +astropy/coordinates/tests/test_representation.pyc,, +astropy/coordinates/tests/test_representation_arithmetic.py,sha256=tH2F0WpelfdjiPhB4MagVXHP2DeBT0jXdP5Id9LzaQE,56077 +astropy/coordinates/tests/test_representation_arithmetic.pyc,, +astropy/coordinates/tests/test_representation_methods.py,sha256=6iypmUndQlLImVni9Zdy5bt11jmUfrtUmUHn_7ztTCM,12413 +astropy/coordinates/tests/test_representation_methods.pyc,, +astropy/coordinates/tests/test_shape_manipulation.py,sha256=NTOmMyqCLUhWQ_4ad5odNvjKHKkSMzSijPtu4w7tMS8,14680 +astropy/coordinates/tests/test_shape_manipulation.pyc,, +astropy/coordinates/tests/test_sites.py,sha256=f25jZ3NkP3X94iOgN3-mWBMkz8ITrstrY_IypoCpUgw,6099 +astropy/coordinates/tests/test_sites.pyc,, +astropy/coordinates/tests/test_sky_coord.py,sha256=cjw1EvpwDDvwer5Xlan_IHYaSh0Y-ZqgV_Yi62bHpWo,52285 +astropy/coordinates/tests/test_sky_coord.pyc,, +astropy/coordinates/tests/test_skyoffset_transformations.py,sha256=Ct6HKcvf8l9fHLj2pdyTpESyUlXZMS38JKfrj7M7cek,12857 +astropy/coordinates/tests/test_skyoffset_transformations.pyc,, +astropy/coordinates/tests/test_solar_system.py,sha256=bVGCOvv2_xduPj49EElj5axVkEcDCd9ti9fCLMu6YOU,15063 +astropy/coordinates/tests/test_solar_system.pyc,, +astropy/coordinates/tests/test_transformations.py,sha256=J_28voDHBO0Uf985B_UV7fK9Pszq8qqhtaCT5xzQkQo,13879 +astropy/coordinates/tests/test_transformations.pyc,, +astropy/coordinates/tests/test_unit_representation.py,sha256=8Ttdz0MBXrpjIkJADYPVpm2IeqAOEmGsQA3sv3S_wMo,3314 +astropy/coordinates/tests/test_unit_representation.pyc,, +astropy/coordinates/tests/test_velocity_corrs.py,sha256=6Q6BZ6rF5i_JDjM2q1EF7iblexEAh6X5k-ayoVFkvUw,16140 +astropy/coordinates/tests/test_velocity_corrs.pyc,, +astropy/coordinates/tests/utils.py,sha256=MhlMrzKuByLz14_i52NdMlXEe1uqshW8UTujzeX1o4c,904 +astropy/coordinates/tests/utils.pyc,, +astropy/coordinates/transformations.py,sha256=6htw-DYgs6FLulIK-ShmCQoulxvH5uryelzkrvYNVLY,53754 +astropy/coordinates/transformations.pyc,, +astropy/cosmology/__init__.py,sha256=oXzQcJ-3-Ip95tTSJgIJwHZ8MZItZHfnFvtacD0rtic,489 +astropy/cosmology/__init__.pyc,, +astropy/cosmology/core.py,sha256=YCJRrJxZjDCCcn2pykjrgyyYfucbqAZarIWtNG8V_E4,99302 +astropy/cosmology/core.pyc,, +astropy/cosmology/funcs.py,sha256=thF3Jmf3-RWhf9Wd0q5YmEN0aUq5ILvT0h6GqFCrFzs,5499 +astropy/cosmology/funcs.pyc,, +astropy/cosmology/parameters.py,sha256=KwErAZ6SRZxCjFCjB6l7xgBbFTi2mDH2WjsOjmV7qHE,4257 +astropy/cosmology/parameters.pyc,, +astropy/cosmology/scalar_inv_efuncs.so,sha256=se1YGwO8dX3QcoUwavxDqyKz_u0xu3plXJllz-pDEOo,458304 +astropy/cosmology/setup_package.py,sha256=82VuCVSU5qR1xNbgTtuQ-m9p-q3dpRAP499jMIfi3Hs,104 +astropy/cosmology/setup_package.pyc,, +astropy/cosmology/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/cosmology/tests/__init__.pyc,, +astropy/cosmology/tests/test_cosmology.py,sha256=IOzeIrJVlSiNYHGoK34CIwnKOnVjKL40dOMosUMoQxI,70548 +astropy/cosmology/tests/test_cosmology.pyc,, +astropy/cosmology/tests/test_pickle.py,sha256=GXymYFsA09kfD4j1_47K3mqLy6xgCayrmhzMXRkaM-w,584 +astropy/cosmology/tests/test_pickle.pyc,, +astropy/cython_version.py,sha256=dJDSN_Abk8wjFtKv8kTQlfynUhICsq6mKQ5yPlM-Zp8,58 +astropy/cython_version.pyc,, +astropy/extern/__init__.py,sha256=FEc8xZMs4AvIUQhHhDq6dm_MmRLOWdgD8Oq-ZpK0oOk,457 +astropy/extern/__init__.pyc,, +astropy/extern/bundled/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/extern/bundled/__init__.pyc,, +astropy/extern/bundled/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 +astropy/extern/bundled/six.pyc,, +astropy/extern/configobj/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/extern/configobj/__init__.pyc,, +astropy/extern/configobj/configobj.py,sha256=BcxIVcSl0qCNjbJM4NZu0_iAA-G-luTQjbUXtyL54QM,88263 +astropy/extern/configobj/configobj.pyc,, +astropy/extern/configobj/validate.py,sha256=CChPj6IPH-MbWNAx3maSBZvnYXehs-JXRTgbHenOwQ0,46708 +astropy/extern/configobj/validate.pyc,, +astropy/extern/css/jquery.dataTables.css,sha256=2wizTcUFbgPcr14dXfEXtY-1JPRLMYRabytFJ50j5uw,15423 +astropy/extern/js/jquery-3.1.1.js,sha256=16cdPddA6VdVInumRGo6IbivbERE8p7CQR3HzTBuELA,267194 +astropy/extern/js/jquery-3.1.1.min.js,sha256=hVVnYaiADRTO2PzUGmuLJr8BLUSjGIZsDYGmIJLv2b8,86709 +astropy/extern/js/jquery.dataTables.js,sha256=lk-4-kSE2NTrFM0hFjmtpZ9BVKvZ1LKBvpK2ytmBm_0,447282 +astropy/extern/js/jquery.dataTables.min.js,sha256=TX6POJQ2u5_aJmHTJ_XUL5vWCbuOw0AQdgUEzk4vYMc,82638 +astropy/extern/plugins/__init__.py,sha256=FEc8xZMs4AvIUQhHhDq6dm_MmRLOWdgD8Oq-ZpK0oOk,457 +astropy/extern/plugins/__init__.pyc,, +astropy/extern/plugins/pytest_doctestplus/__init__.py,sha256=9NlnHGMB-8JoAClAVBmLSE39oo-lQlVeEhc59vGs7Wc,150 +astropy/extern/plugins/pytest_doctestplus/__init__.pyc,, +astropy/extern/plugins/pytest_doctestplus/output_checker.py,sha256=NVoipk_EGpLIPSmEtJ8z9LicvQaqctQIFC7V50nXQgU,7728 +astropy/extern/plugins/pytest_doctestplus/output_checker.pyc,, +astropy/extern/plugins/pytest_doctestplus/plugin.py,sha256=KQl2JUVZQCh-R5fo99-3VqYzyfADk4Oo_agSyj-nBdA,15232 +astropy/extern/plugins/pytest_doctestplus/plugin.pyc,, +astropy/extern/plugins/pytest_openfiles/__init__.py,sha256=9NlnHGMB-8JoAClAVBmLSE39oo-lQlVeEhc59vGs7Wc,150 +astropy/extern/plugins/pytest_openfiles/__init__.pyc,, +astropy/extern/plugins/pytest_openfiles/plugin.py,sha256=BtmapYDVDOcVwkazPZKBmaT9SzgMBIQKfMqFK4UMHQQ,3238 +astropy/extern/plugins/pytest_openfiles/plugin.pyc,, +astropy/extern/plugins/pytest_remotedata/__init__.py,sha256=9NlnHGMB-8JoAClAVBmLSE39oo-lQlVeEhc59vGs7Wc,150 +astropy/extern/plugins/pytest_remotedata/__init__.pyc,, +astropy/extern/plugins/pytest_remotedata/disable_internet.py,sha256=At_Z0KUr5aUOHQWNC-IC3DXx7o4QoRl2y-QJg5xF7SU,6271 +astropy/extern/plugins/pytest_remotedata/disable_internet.pyc,, +astropy/extern/plugins/pytest_remotedata/plugin.py,sha256=ebyJYgaRQcnKIPKdy12FiYXbmhdZSj8BAMiJhkIza14,3548 +astropy/extern/plugins/pytest_remotedata/plugin.pyc,, +astropy/extern/ply/__init__.py,sha256=q4s86QwRsYRa20L9ueSxfh-hPihpftBjDOvYa2_SS2Y,102 +astropy/extern/ply/__init__.pyc,, +astropy/extern/ply/cpp.py,sha256=rVpp9C88FqD4qCU8jyqKs9B2CUsWOikjHjv47lrt_S8,33315 +astropy/extern/ply/cpp.pyc,, +astropy/extern/ply/ctokens.py,sha256=MKksnN40TehPhgVfxCJhjj_BjL943apreABKYz-bl0Y,3177 +astropy/extern/ply/ctokens.pyc,, +astropy/extern/ply/lex.py,sha256=Ve5Rgh6tw7liBCeFydUXNS82FNWsnHBRZO5qWdkzLV8,42955 +astropy/extern/ply/lex.pyc,, +astropy/extern/ply/yacc.py,sha256=oyMiGv1pkkzaOa8s2YxlTwkbCu4S5goDJ1zhipjaNnk,137265 +astropy/extern/ply/yacc.pyc,, +astropy/extern/setup_package.py,sha256=F_PoVx3Ueb9KJW9G95ARVKKJpe8JLoRINYL1WkkAeIc,288 +astropy/extern/setup_package.pyc,, +astropy/extern/six.py,sha256=-IpTcYIluF2nQsrAshFfxlzyoszqCB2GtUSDGcSQAqQ,1705 +astropy/extern/six.pyc,, +astropy/io/__init__.py,sha256=qWiybQoOBpkfdNNZJVrwrDVxXOLSoVLOTL4N5f1MDFw,180 +astropy/io/__init__.pyc,, +astropy/io/ascii/__init__.py,sha256=m7aoOlZatoWKczsN97PR0pNunyMP4a3PIYyPrbbhTqM,1630 +astropy/io/ascii/__init__.pyc,, +astropy/io/ascii/basic.py,sha256=wE75liQJNPsfOGePmGD6yYFauxdSgrvoV_MP0XhFzrg,11087 +astropy/io/ascii/basic.pyc,, +astropy/io/ascii/cds.py,sha256=XkeMVDesRPY2ZHWGycTMLYFUPR02xNPZGDHcV98Csek,13475 +astropy/io/ascii/cds.pyc,, +astropy/io/ascii/connect.py,sha256=WZWTCyJuZngzzrnJmrfS8l4NCneiDybxX5XHqZcRAWI,2660 +astropy/io/ascii/connect.pyc,, +astropy/io/ascii/core.py,sha256=i1QTyY458h1Xmo2jGWOJsYj9ULNYmmIyatJ0zrp1VLQ,57267 +astropy/io/ascii/core.pyc,, +astropy/io/ascii/cparser.so,sha256=FM5pteaKLrNRGno7SAoeUziIua-V5senVE13KZGLW3g,1476384 +astropy/io/ascii/daophot.py,sha256=kMWiSekt9VxM_muq3f9Y4aTH5yttGmZf1Xfa27b95aA,14945 +astropy/io/ascii/daophot.pyc,, +astropy/io/ascii/ecsv.py,sha256=qhtutF0x82ICb7JW2MFKV5y9zP7nATnQWQNelAdgqqM,9460 +astropy/io/ascii/ecsv.pyc,, +astropy/io/ascii/fastbasic.py,sha256=OeVOgGCGKRucCWxVikfFaCfvafPg8Jn6M_sQIif4xF4,14162 +astropy/io/ascii/fastbasic.pyc,, +astropy/io/ascii/fixedwidth.py,sha256=BjlUjMzOWlcQx566NFZ2Ibr1sPeFqsJKar94KYUdQow,15486 +astropy/io/ascii/fixedwidth.pyc,, +astropy/io/ascii/html.py,sha256=Bw5okx9XbW1wIskcCA_auCDvqPqDecGJ5NwVC3IAR7M,17256 +astropy/io/ascii/html.pyc,, +astropy/io/ascii/ipac.py,sha256=5JXVmIRQl2FmCNlaYhwvVcsoJvrA81tsTYWyF7lblfo,20637 +astropy/io/ascii/ipac.pyc,, +astropy/io/ascii/latex.py,sha256=yYCY2nJm6GKzbyGFWiiLSPpcMxHBpwOcF7nAiGXB6CM,16260 +astropy/io/ascii/latex.pyc,, +astropy/io/ascii/misc.py,sha256=Yb8U7Fj0eyzNtVz2ARTmBYhQpLLbkoirxGJ8vhlDuto,4232 +astropy/io/ascii/misc.pyc,, +astropy/io/ascii/rst.py,sha256=S82ROb5X4rgcEa2ibeV84mpljOoSbuW3m24Qkh5oSVM,1827 +astropy/io/ascii/rst.pyc,, +astropy/io/ascii/setup_package.py,sha256=Ho1luiHr-tEDedovK2-4Y5zJzWgAiscgN29MDQ5cxvQ,4369 +astropy/io/ascii/setup_package.pyc,, +astropy/io/ascii/sextractor.py,sha256=P3dIWnPIVz-6XyvNOE9NUUtK4pmBvYKrE_vHM-Mh-OM,6355 +astropy/io/ascii/sextractor.pyc,, +astropy/io/ascii/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/io/ascii/tests/__init__.pyc,, +astropy/io/ascii/tests/common.py,sha256=8YcilJRKpaQhZDp-yAPmW50b55JhcI3dXC1VE1otJ7A,2803 +astropy/io/ascii/tests/common.pyc,, +astropy/io/ascii/tests/t/apostrophe.rdb,sha256=rs9u8uv6jIkg0rHfB-dLszXTLf3JBGikepZViq4tqGE,92 +astropy/io/ascii/tests/t/apostrophe.tab,sha256=VFjQLMZoD4tpqxDw7pPMxW6IevMWLXFjQhg4p7eJYFk,49 +astropy/io/ascii/tests/t/bad.txt,sha256=jst6xQ1h2dE_ImcQzp7tDdfdDJCMNHwxq7JeyX43j2Y,153 +astropy/io/ascii/tests/t/bars_at_ends.txt,sha256=aU4_ZBLKI5DnEOiiWBOu7SvtvCDKgteVFNqE0qpr7G0,253 +astropy/io/ascii/tests/t/cds.dat,sha256=aRfKYW47eAFGRY78b3Q9Uiw0IgoVxP80owXYIkluogc,2433 +astropy/io/ascii/tests/t/cds/description/ReadMe,sha256=1Jgv2hIpeOYa00nL3gx1H5H03uk6fWEbysiIpe1KrY4,3649 +astropy/io/ascii/tests/t/cds/description/table.dat,sha256=RkEOnYoaDh-dmz5bQJ10N7Wn063C5YfC1ta7pYwF3lE,114 +astropy/io/ascii/tests/t/cds/glob/ReadMe,sha256=SG9CfBVPaQ5DvIYHv4mtUDm9ks1KC9NmGvD_h2p6snY,31440 +astropy/io/ascii/tests/t/cds/glob/lmxbrefs.dat,sha256=PcSGuQBKM808MKcYct43rgyLnSaqciXRDueVakL0Ihk,32040 +astropy/io/ascii/tests/t/cds/multi/ReadMe,sha256=RvuuGh_ub8Dvj6-36CDCMZLkG718ylAGy9HdpTDuCFg,3395 +astropy/io/ascii/tests/t/cds/multi/lhs2065.dat,sha256=dTJdHu-ajmDybJvOPLOdLfxsLyfLMnXsHW4f-Kc7RJY,432 +astropy/io/ascii/tests/t/cds/multi/lp944-20.dat,sha256=rKTSBaHkS2k4uLgWS1XyWhgSwDj6gufe5AFKIsJdRHg,434 +astropy/io/ascii/tests/t/cds2.dat,sha256=WMxLXu4tBl4oRvMg8lvu2uEsXo_6Q8Fk11TLOr_Xok0,42714 +astropy/io/ascii/tests/t/cds_malformed.dat,sha256=q54LilMMCr48w3Ghzhah2CAEdp5gA3x6JaJDOb1Tqx0,2352 +astropy/io/ascii/tests/t/commented_header.dat,sha256=YB7hqkoutRF3V971C726GAURbGogDsyZShSREeKeXOI,37 +astropy/io/ascii/tests/t/commented_header2.dat,sha256=YTZmhncoIrmHoTP22dL94PRh2mJMw2LdYd1kbYTG0LQ,60 +astropy/io/ascii/tests/t/continuation.dat,sha256=LBEkswzRG8j7HrpsUArRslvRjMyfuppXXG3xHJD71hI,38 +astropy/io/ascii/tests/t/daophot.dat,sha256=xIk5tcLTYDwXPpk4DluEkoJ_zuH-tY-RiVIIE2XSsz4,1923 +astropy/io/ascii/tests/t/daophot.dat.gz,sha256=oMpmMBN7U_6HTKQzau_0s4nvC-tOnNi6cFL0U9sL2hg,793 +astropy/io/ascii/tests/t/daophot2.dat,sha256=z74wewCKrAuaOH9Dwo-I4Rb-c3GA4jNqhE4r8OsHGog,2036 +astropy/io/ascii/tests/t/daophot3.dat,sha256=IKyVJ1R6_PiPVLZbZvevxuqKJrghxr-aE2kxGCdSPiM,7920 +astropy/io/ascii/tests/t/daophot4.dat,sha256=y8oC2Tbdoe0tBNWGb-BW3g9G7tAJkCFebS_n3mdpASE,7353 +astropy/io/ascii/tests/t/fill_values.txt,sha256=3U8hdH06LreDnq_UiGqYiOezcB2dny3o0xfF9CPhUSA,18 +astropy/io/ascii/tests/t/fixed_width_2_line.txt,sha256=-ytHbhrsHSAmbPrkDfpPTuFtyB9taWBHJqOcscjzkkk,100 +astropy/io/ascii/tests/t/html.html,sha256=Y_h7SszOL73tMjICafMx_GWF9dE4EIHDp5Bsl9oUXAU,794 +astropy/io/ascii/tests/t/html2.html,sha256=mueWMZaMO8d-EPAFVACiW_8IiK7GE5b7AX8WhudXz4k,378 +astropy/io/ascii/tests/t/ipac.dat,sha256=ir5TyCiGCi1-1R-SzgL7AJcR7lRpLVMxqhat1fEzJCY,552 +astropy/io/ascii/tests/t/ipac.dat.bz2,sha256=o9j--1EoMdmFQiLBq_a0v9syQiWnYMLqDd93u1vABOA,385 +astropy/io/ascii/tests/t/ipac.dat.xz,sha256=35f2GVs0hYeJ3Ke7wlO0S9ZR1PwG5KHokc8qMG9__0M,320 +astropy/io/ascii/tests/t/latex1.tex,sha256=A65ggO9hIOZdBwLVs2q7laFwVzj0guY-fU4kiFmlsXY,252 +astropy/io/ascii/tests/t/latex1.tex.gz,sha256=5AvB9MQ6fXhfOY4MNK2tRYJpAO4OmH3TaXVUfg5tcFE,198 +astropy/io/ascii/tests/t/latex2.tex,sha256=mWdE24-1ccgQB5AgsztlmHwF85x2Rf3jZcVQImBgY8M,426 +astropy/io/ascii/tests/t/latex3.tex,sha256=aYaFRT-zFip8VrQy4NHAua7PZDzzNfskdBgBMjCp5rw,100 +astropy/io/ascii/tests/t/nls1_stackinfo.dbout,sha256=nVu_XgDHthRHOoSn0ugV-eRfyXRgvLOmTHHllR1m8UI,20440 +astropy/io/ascii/tests/t/no_data_cds.dat,sha256=VTc5tMFQFcbnbX_Qv5SNTyftpfaN2a3wgocJjTdXzDw,2345 +astropy/io/ascii/tests/t/no_data_daophot.dat,sha256=fJw6YKq8VulJmeJr3uXqyu5Kn0IfM3-RWV417Vt_KNw,545 +astropy/io/ascii/tests/t/no_data_ipac.dat,sha256=oTC4_bIv7_D0elydC8Fp1VMJEWVHw_mBxuBUaNHGucA,517 +astropy/io/ascii/tests/t/no_data_sextractor.dat,sha256=COrav-qGt5q8gTFCtY6CNjNKgIqRJ5UgCUGClWjl3p4,122 +astropy/io/ascii/tests/t/no_data_with_header.dat,sha256=GiWVNGWrZx1UswEIqZUbVQD6QJlAmO-UY4UwBNp5M-E,6 +astropy/io/ascii/tests/t/no_data_without_header.dat,sha256=kX-OOrSYuQd2csSjG7p6pXBnIXiv0kpLE0s--3usW9k,21 +astropy/io/ascii/tests/t/sextractor.dat,sha256=6QCQpjR02CeaT2NndWZA8hDBXzNEDmeuP_RN7AhVrHE,369 +astropy/io/ascii/tests/t/sextractor2.dat,sha256=QmetTMOfWvWRRFFOhIXVAd9OeRaE6i8D6NQ1dslVM2Y,739 +astropy/io/ascii/tests/t/sextractor3.dat,sha256=JuYXAn_1_WcMqNMKd5sOVix2jBRk8Mxc2Mnbvx2NqIA,1110 +astropy/io/ascii/tests/t/short.rdb,sha256=m2l50eJ_rrQ2aXmwqCEBoMz_CvY4YNsvIC9uOlZ5qEU,156 +astropy/io/ascii/tests/t/short.rdb.bz2,sha256=U6JrvY2O4Icq7idFu2DrqdRCjCCNOm4hVslo2VJvub4,146 +astropy/io/ascii/tests/t/short.rdb.gz,sha256=086Fg1H3Q3BQhLhfGBJAw_MKQ7iYvzSoVkrWQYrWwhc,148 +astropy/io/ascii/tests/t/short.rdb.xz,sha256=MIEQmLpqdVF1Ym3LlV3wh5KLHyoQ6-pW11_2KCur2S4,192 +astropy/io/ascii/tests/t/short.tab,sha256=Y1Zgl8e9thUqJ66PHIM-pQvdr7YpQoVLXeVptxLmgl8,122 +astropy/io/ascii/tests/t/simple.txt,sha256=j5jdCVqjGV-uFZ5oW0W93seCtz97FzFKhGJBGqM7hKo,125 +astropy/io/ascii/tests/t/simple2.txt,sha256=xi37aas-4XMNExgXclcDfiqPk344oeHzbmeFRBtTmHE,245 +astropy/io/ascii/tests/t/simple3.txt,sha256=9FYVYXVR5zGPtCbO0IUwp5QkJR8Lzn1yMfX7l1aiX7M,100 +astropy/io/ascii/tests/t/simple4.txt,sha256=_ohZxWXS28JJPTYDNfpeJa9XoLLiJn0jZuQpuTGzkVw,184 +astropy/io/ascii/tests/t/simple5.txt,sha256=e1Mo63Vn_XLI4o0tmWt73j7-IUbtsddib8CUTzHcNug,239 +astropy/io/ascii/tests/t/simple_csv.csv,sha256=wCwoEHcYGv3sczqgrPgFPQFVKEXku0qBMoeuf0hLjJw,17 +astropy/io/ascii/tests/t/simple_csv_missing.csv,sha256=f9ousWSyjLxQkmRSFGP7fygQoAHcKpzNyZxctgk4Ofc,14 +astropy/io/ascii/tests/t/space_delim_blank_lines.txt,sha256=jXHFWtMA2EF7ozN1JG0ARWW-UHRKY5MHZlmYw6sMqH8,235 +astropy/io/ascii/tests/t/space_delim_no_header.dat,sha256=4DA0RA4bSAH9fyBrEA_zHJswg-Ud86YcwxEUQjSrvnw,24 +astropy/io/ascii/tests/t/space_delim_no_names.dat,sha256=hx1uf0Y6QsVXMgPFDbblLaUG1Tla9z4JI3QQ3sXv-FY,8 +astropy/io/ascii/tests/t/test4.dat,sha256=1W8QlLQzvXcwNXj0Xlx2_YhVz1-Du_n_jteIM_LznoE,736 +astropy/io/ascii/tests/t/test5.dat,sha256=Y4mFF4oRw3msPjmlQdCWUiG5afhPIOVPVbDWgFnJkxM,1471 +astropy/io/ascii/tests/t/vizier/ReadMe,sha256=8BQ6y9nrtDk37cv6qM-nDHdEF8ZLk1JPpSDhW5FdleU,4905 +astropy/io/ascii/tests/t/vizier/table1.dat,sha256=yhaEo3V-Ju9sNNDW_ZIO4eGrh5G2wGXeH3EI3V74oVc,1457 +astropy/io/ascii/tests/t/vizier/table5.dat,sha256=uRsmUbTgIgNlIBIZhluCAQw3Gx2r1COCT0F79J35kqQ,2793 +astropy/io/ascii/tests/t/vots_spec.dat,sha256=nuvznlIEcLLXFNnQGQPCnOmetnUbMMdGx4scQiB8CQU,5945 +astropy/io/ascii/tests/t/whitespace.dat,sha256=25uy9_U7pqqsbdeKSl0ST4IXeQYnfeULkBCK8dth00A,110 +astropy/io/ascii/tests/test_c_reader.py,sha256=AIwsaGGXuLDYbBLpofpJytxnAcVxxp9LHrvaARWKApI,42680 +astropy/io/ascii/tests/test_c_reader.pyc,, +astropy/io/ascii/tests/test_cds_header_from_readme.py,sha256=CNcPvcd14V0sS34pYAUtsnon_z7RSAKDw-L1x-DkKdA,4273 +astropy/io/ascii/tests/test_cds_header_from_readme.pyc,, +astropy/io/ascii/tests/test_compressed.py,sha256=xUqlu0yD8tmzD22qBTvyxK-Aje2uTfB6Aamm4SnZYtA,1625 +astropy/io/ascii/tests/test_compressed.pyc,, +astropy/io/ascii/tests/test_connect.py,sha256=Fkc2FKu-oTvaRrTMCjzybNB-bml6EUPGokaa-U7DDhA,3608 +astropy/io/ascii/tests/test_connect.pyc,, +astropy/io/ascii/tests/test_ecsv.py,sha256=3yPJAWgOngrqaUfC8h0QzWQD6rIkIvDWhAjVbUxM21w,13456 +astropy/io/ascii/tests/test_ecsv.pyc,, +astropy/io/ascii/tests/test_fixedwidth.py,sha256=et9tkNrvimk10anyZS1LpT4VoGnwzJki7Fdghm6oT-Q,15398 +astropy/io/ascii/tests/test_fixedwidth.pyc,, +astropy/io/ascii/tests/test_html.py,sha256=WxgANL7MuWX06i7RiorhPKGTjkO3WryZ_iz8PAAHIuk,22352 +astropy/io/ascii/tests/test_html.pyc,, +astropy/io/ascii/tests/test_ipac_definitions.py,sha256=JXgOFzv1AmbhGKZAeS_0nxpaamXuhDCVnZUoZUZ53rI,4170 +astropy/io/ascii/tests/test_ipac_definitions.pyc,, +astropy/io/ascii/tests/test_read.py,sha256=HrT2Z8-ruPmPh8MQCCMMWFHfsI6gZUHp5X8K5mtABZw,48917 +astropy/io/ascii/tests/test_read.pyc,, +astropy/io/ascii/tests/test_rst.py,sha256=yKe4ExJmqYKK73yWJ3ii1mWEQkyBgs253_c-gqCB4Hk,5054 +astropy/io/ascii/tests/test_rst.pyc,, +astropy/io/ascii/tests/test_types.py,sha256=yMnN8DrnYnhcWPH_SZKdL-YZ7VwSp0G05Ys7WNXSNHA,1749 +astropy/io/ascii/tests/test_types.pyc,, +astropy/io/ascii/tests/test_write.py,sha256=9OnOFnXnGOLABZOe7Tx8doPhPha4mj3fmS3AYb1Z8CA,25382 +astropy/io/ascii/tests/test_write.pyc,, +astropy/io/ascii/ui.py,sha256=Hx8vmPlCelMSHcj0iLj1Kzd65i6wF04wVr92byN_W2A,31634 +astropy/io/ascii/ui.pyc,, +astropy/io/fits/__init__.py,sha256=CPZuH_w6zDSm3pnvQM_aEsxmqEUR90Hmq16JheepI4Q,3317 +astropy/io/fits/__init__.pyc,, +astropy/io/fits/_numpy_hacks.py,sha256=1ZYtC_zSwgNun26b_gEZXyBOd4OgqnCoPI2xIwg94fM,2769 +astropy/io/fits/_numpy_hacks.pyc,, +astropy/io/fits/card.py,sha256=cQOugeHx-x8Hf1LfPkHSRDn255DaHTQ64VMP_URSOks,48497 +astropy/io/fits/card.pyc,, +astropy/io/fits/column.py,sha256=gHC4DEEN4oY3D0t7QRxF9BKSzqkm0eFykdTeUoqAKuU,83751 +astropy/io/fits/column.pyc,, +astropy/io/fits/compression.so,sha256=SEbunewQxJm8qbzmZXr6GfMbTBvTVJCNu5arq9aDqQI,4592120 +astropy/io/fits/connect.py,sha256=tzBdo4uOxKUayZ0-8pKB3F9zSdmaFkXAgvMnT_7Ee9I,6613 +astropy/io/fits/connect.pyc,, +astropy/io/fits/convenience.py,sha256=DmYoKhXEa2wwbmeECwYM67BF_eisOWhJGywT4-FXx7A,38108 +astropy/io/fits/convenience.pyc,, +astropy/io/fits/diff.py,sha256=E4AlEQ2bHX_eimUJbvnyRxZx7x_K7aC1EWqSq_dWPKM,64028 +astropy/io/fits/diff.pyc,, +astropy/io/fits/file.py,sha256=-BAbxPIhWTEunq9y1woYARDflHoY4W-BliXzAKS2vKI,22023 +astropy/io/fits/file.pyc,, +astropy/io/fits/fitsrec.py,sha256=qYm7-T81-ERsm2Y0231nrtEimrLFKQu6APpOfFMXvy0,54060 +astropy/io/fits/fitsrec.pyc,, +astropy/io/fits/hdu/__init__.py,sha256=ZQSBb14xDUqt8fjFQFxI31PIh1pIFYUbLnZ3UwmSnAk,697 +astropy/io/fits/hdu/__init__.pyc,, +astropy/io/fits/hdu/base.py,sha256=OeRglEQrWmhY48F9qKu8mlid7m43Bm8SZHCWqxgMMec,61327 +astropy/io/fits/hdu/base.pyc,, +astropy/io/fits/hdu/compressed.py,sha256=eLhrDfre_o1U5QBXMY8ERGQnJdbNrFZ_RU1FFCWnbuw,85395 +astropy/io/fits/hdu/compressed.pyc,, +astropy/io/fits/hdu/groups.py,sha256=ieoqe4SJiVCEVE8AimBJKxbDMTMWsKL1UJ9w1HzJCT8,21592 +astropy/io/fits/hdu/groups.pyc,, +astropy/io/fits/hdu/hdulist.py,sha256=CpsRFRco1Kbs7kpAGGzrVbMVs3YJZoA7pOqL40XEOG4,52045 +astropy/io/fits/hdu/hdulist.pyc,, +astropy/io/fits/hdu/image.py,sha256=EBVS5prWX2OR2XGdBvxCMSXJxScLlNGkjo51xfMys_U,44114 +astropy/io/fits/hdu/image.pyc,, +astropy/io/fits/hdu/nonstandard.py,sha256=ZgXplZLBCBxSWdvvbH2zUGgIhpPpTkwTcXgtvTDDB8Y,4062 +astropy/io/fits/hdu/nonstandard.pyc,, +astropy/io/fits/hdu/streaming.py,sha256=USEXOMl1OOHvepaurAxfTzLhJUlYXB3v8lpDVlHKl8Q,7711 +astropy/io/fits/hdu/streaming.pyc,, +astropy/io/fits/hdu/table.py,sha256=SGlDrBrb8xryqYcR04EZE6NIAYGjyDmeLKceVJ6hFao,57174 +astropy/io/fits/hdu/table.pyc,, +astropy/io/fits/header.py,sha256=e14YdSNPSt7sEKj2iXIK7HizWB11wQPQQcQPDhotpPg,74539 +astropy/io/fits/header.pyc,, +astropy/io/fits/py3compat.py,sha256=3xBMKybypzDdVTaR0kUixga-rI1UzBbvVGGaW608RNE,3712 +astropy/io/fits/py3compat.pyc,, +astropy/io/fits/scripts/__init__.py,sha256=OR90Qv2NMW3fdn4Kcg9c5NkppPrvFxsq8vCPg-gwW0c,291 +astropy/io/fits/scripts/__init__.pyc,, +astropy/io/fits/scripts/fitscheck.py,sha256=wXKjskAUDfXmjAR8WUKGhFBy6PLt_3o_8s63ghiBtEY,6838 +astropy/io/fits/scripts/fitscheck.pyc,, +astropy/io/fits/scripts/fitsdiff.py,sha256=R9Tlc1AVQyhGhV9-tSSbAM8_QKZzPCesSZmzWh_4W0c,12926 +astropy/io/fits/scripts/fitsdiff.pyc,, +astropy/io/fits/scripts/fitsheader.py,sha256=Wy8p9WR4vsxaRwWNANLsn9ERL3lceVOPsXL7D4JoWOA,12553 +astropy/io/fits/scripts/fitsheader.pyc,, +astropy/io/fits/scripts/fitsinfo.py,sha256=SnvCfo94vBECr2Ep1cSlVxa3ti7oWLfKFBDF2A7l9zo,1784 +astropy/io/fits/scripts/fitsinfo.pyc,, +astropy/io/fits/setup_package.py,sha256=QocjhAQbQSAVC_Hn3bVgZ6HPQGUSHzPVLGbREumsPMc,2958 +astropy/io/fits/setup_package.pyc,, +astropy/io/fits/tests/__init__.py,sha256=7yoanMZxkp3sDh7WTMRPPa-WYlTSrq8HmvIiNn-bErE,2069 +astropy/io/fits/tests/__init__.pyc,, +astropy/io/fits/tests/data/arange.fits,sha256=Fc2vcpojV6h8bPCslpzvMM6-gw7tf8ZtzmfzYkbK5kg,8640 +astropy/io/fits/tests/data/ascii.fits,sha256=s9XnnFz5hNjyIhLatPpe9eRvQ8A5CwCPTYN01_P-OkI,8640 +astropy/io/fits/tests/data/blank.fits,sha256=2XkZNoWQwnBvMzdcCzmzeH0_L7j73p9hOYcH_kp_P-s,5760 +astropy/io/fits/tests/data/btable.fits,sha256=UU73L2JZ5W01pTJDvARSPE0nUzpJG9EteyS7aiov_n0,8640 +astropy/io/fits/tests/data/checksum.fits,sha256=gKbt25uaC2LryAX15cmdx1GMZuIKUmacve07rbDRMKU,20160 +astropy/io/fits/tests/data/comp.fits,sha256=vGdb13XNUmuknpceUlfDAod-_MOqiamok8EPQ7DOUlo,86400 +astropy/io/fits/tests/data/compressed_float_bzero.fits,sha256=PEO0Sz8ag4X3Dj7wWdik-nBJLgNN0M0uVzO-uaNeQnU,8640 +astropy/io/fits/tests/data/compressed_image.fits,sha256=3AjX_Sa469pdt9n709yImZn6zpX75Cr2pvQYc10-BP4,8640 +astropy/io/fits/tests/data/fixed-1890.fits,sha256=aWQZK71MwVSFxbEyVdWO3iLGFLiZPJm6TNFLCS1Qz4Q,31680 +astropy/io/fits/tests/data/group.fits,sha256=OvLlW0kNRCx9cPeMb6J3evx6_bEnyEHYgY1X17GprXI,5760 +astropy/io/fits/tests/data/history_header.fits,sha256=0KRt2J7hizrkhsoupkmTI3ZHAMTxcvKMB0YNYJH7Sz8,2880 +astropy/io/fits/tests/data/memtest.fits,sha256=XJgG7O2DFc9-s-jYeDuDHvVk79rWV8feg6ZGvTHYyaQ,31680 +astropy/io/fits/tests/data/o4sp040b0_raw.fits,sha256=255ISTsiYnYGT-HTPxxgAl7UZqp0UWVy8gcX0o9wGFs,74880 +astropy/io/fits/tests/data/random_groups.fits,sha256=BMdOwhCH033IPhBkEXr2Knn86T1sRQFiM2dtBHm34T8,20160 +astropy/io/fits/tests/data/scale.fits,sha256=0V7Q4Vh99b_CeS7tJF6Oe-PpDUArAaZAw3p4-kMDmC0,8640 +astropy/io/fits/tests/data/stddata.fits,sha256=2TdoFuJDBUR7eio0Yx8jvkeqAECRUN7ppfUIDsIbGgg,23040 +astropy/io/fits/tests/data/table.fits,sha256=6Gvd5QXvwP60b9MuJCG45vpnsd2WObJJ3QCTy1FMXMo,8640 +astropy/io/fits/tests/data/tb.fits,sha256=4YkaVFOvLMyhaho9g8lnhepij8ZZpFPfhMkNhTnJ8Kc,8640 +astropy/io/fits/tests/data/tdim.fits,sha256=QOhVGXpAUmMk2anTjcucoNz544Y6nYhwxubgvdwgSBA,8640 +astropy/io/fits/tests/data/test0.fits,sha256=6gbuMLKPHqLoymLFKJdWdjt_QTVtf6MpHbw0bi7TTpQ,57600 +astropy/io/fits/tests/data/variable_length_table.fits,sha256=ZanMGZ_2yZGOl-u_5pVKWfcMq0ctLxNJYKiVdQI-lO0,8640 +astropy/io/fits/tests/data/zerowidth.fits,sha256=DV2p9yTiZBvIHM5rTJwcK6oMZP62p0NEsZvhDx3Bj_Q,54720 +astropy/io/fits/tests/test_checksum.py,sha256=Ycw9p1ZijMT-Dhc8jYli7L_yJ3_tQH9zrktF2BHB6Ks,20782 +astropy/io/fits/tests/test_checksum.pyc,, +astropy/io/fits/tests/test_connect.py,sha256=xxJ3KwGp4fxhe7nPUxMEiQ8s2a-KxtBqqedDO2aD8wk,12604 +astropy/io/fits/tests/test_connect.pyc,, +astropy/io/fits/tests/test_convenience.py,sha256=dbEKQt--otaRh9pLSMszCYPhjglV2x01CHfrQAcWJ8o,5642 +astropy/io/fits/tests/test_convenience.pyc,, +astropy/io/fits/tests/test_core.py,sha256=7L-qcNEwzzZ-sdA3qFyUKnIgHM3ifdCzWwjg_Q8KWbY,42946 +astropy/io/fits/tests/test_core.pyc,, +astropy/io/fits/tests/test_diff.py,sha256=pbCNCJr5iBYH28KsKBd6z5vcRlm351sENEr6NB5qLas,31202 +astropy/io/fits/tests/test_diff.pyc,, +astropy/io/fits/tests/test_division.py,sha256=JlS0J04C1Sk2wLCGx7mmW0ZpUkgS6tWz836hznFxZ88,1303 +astropy/io/fits/tests/test_division.pyc,, +astropy/io/fits/tests/test_fitsdiff.py,sha256=PV98CQoSs8BuDuTtmjB2rg1oVu-L2aQXpuicWHPsDZ4,8768 +astropy/io/fits/tests/test_fitsdiff.pyc,, +astropy/io/fits/tests/test_fitsheader.py,sha256=T0axiPSpappnzGRoH4VX-jVE2QQL6B0hnkSPPN3un_Q,3075 +astropy/io/fits/tests/test_fitsheader.pyc,, +astropy/io/fits/tests/test_fitsinfo.py,sha256=jO-9jeEyVBOs_GOahAJCtDqxoJfQQe0-z82gyxskIFg,1170 +astropy/io/fits/tests/test_fitsinfo.pyc,, +astropy/io/fits/tests/test_groups.py,sha256=mQX3REcEx7z4jJa2WoUCepZ3IrWpoPnidXdFPqwUs2w,7931 +astropy/io/fits/tests/test_groups.pyc,, +astropy/io/fits/tests/test_hdulist.py,sha256=7wi29nZQGxdpodgric4Ba5gKXXbUv7m1dKXqGFXVHgo,37031 +astropy/io/fits/tests/test_hdulist.pyc,, +astropy/io/fits/tests/test_header.py,sha256=uvsB3S2C2cy11MR9TnhOtzj_Hly3xBmou0fm76YxhJ0,104395 +astropy/io/fits/tests/test_header.pyc,, +astropy/io/fits/tests/test_image.py,sha256=splYmxWEJm1iJnN5TuO7jmODcqa1Cj5j41a6SDuUt_M,78598 +astropy/io/fits/tests/test_image.pyc,, +astropy/io/fits/tests/test_nonstandard.py,sha256=9Oglkec8A0sugWpPtxwkeKWP_1y8yHWn3VFsIlds0i4,2294 +astropy/io/fits/tests/test_nonstandard.pyc,, +astropy/io/fits/tests/test_structured.py,sha256=QadmQOkoEUNMN-iggT8qXnZEFPa5b-VmXSy_8XYDLxM,3170 +astropy/io/fits/tests/test_structured.pyc,, +astropy/io/fits/tests/test_table.py,sha256=AwPRnOKS-aqC0UV9Us1fenQ4NG90BHon70nY-4OyE-w,124290 +astropy/io/fits/tests/test_table.pyc,, +astropy/io/fits/tests/test_uint.py,sha256=vkxvSFrlseYE2ookDu6RLi-WUlZqUnHg5ZnCoIffu4M,4920 +astropy/io/fits/tests/test_uint.pyc,, +astropy/io/fits/tests/test_util.py,sha256=TEUdE24Gpj3krvj5XCSgZp1n8-pQ-XDOym0hTYqCvpA,6702 +astropy/io/fits/tests/test_util.pyc,, +astropy/io/fits/util.py,sha256=N6-5-po1U7lT6sPOs-WGA-3fO_WiI7bGHJZ03KcYPW4,28177 +astropy/io/fits/util.pyc,, +astropy/io/fits/verify.py,sha256=lGt-ebzU8KwaKXajxGAjY589UjanP0trMxe_V7uANbs,5728 +astropy/io/fits/verify.pyc,, +astropy/io/misc/__init__.py,sha256=uPQSNNeufHvCTjnGbu8JqXOB_onW9zEC1jRdsCc_9bs,193 +astropy/io/misc/__init__.pyc,, +astropy/io/misc/connect.py,sha256=e2_5H2SHnCDdOW8DIxM17oS1EsdhN3HT7QUNn2e3oyk,311 +astropy/io/misc/connect.pyc,, +astropy/io/misc/hdf5.py,sha256=qQVQTkhhlxw3LtLeYBxU-P1sottCPFs5nhF_DCTrqQg,10407 +astropy/io/misc/hdf5.pyc,, +astropy/io/misc/pickle_helpers.py,sha256=DBJUrvaMdpQ9XxYiVj42ffFZ0HE34PR3aF5oYnvet-0,3779 +astropy/io/misc/pickle_helpers.pyc,, +astropy/io/misc/tests/__init__.py,sha256=ykqVHge2EmIDTMOd96h2DyGHaM_gpp_wKz3K1MlZZic,64 +astropy/io/misc/tests/__init__.pyc,, +astropy/io/misc/tests/test_hdf5.py,sha256=iGT2zXacUwN8qfT0ISXYDUAnvFMoD1DQFkm-F7vRcNk,14278 +astropy/io/misc/tests/test_hdf5.pyc,, +astropy/io/misc/tests/test_pickle_helpers.py,sha256=BxFNVPkf_87WS85bY7he0TkcuJvI1SzXQumg6Wf_msI,2663 +astropy/io/misc/tests/test_pickle_helpers.pyc,, +astropy/io/misc/tests/test_yaml.py,sha256=4rWEZmsoGJw9ifr_Udl41iznTiEw2hPY8RVdSvjHKSQ,5459 +astropy/io/misc/tests/test_yaml.pyc,, +astropy/io/misc/yaml.py,sha256=BVa8grhrX0_aoDnL2FS67ahKJ2zRW4QGmDtTiuZCmaw,11394 +astropy/io/misc/yaml.pyc,, +astropy/io/registry.py,sha256=ZjDy3LHXL88-N-WEkQrrIUEGZUxZ8Z-XeSf9Trz82j4,21426 +astropy/io/registry.pyc,, +astropy/io/setup_package.py,sha256=82VuCVSU5qR1xNbgTtuQ-m9p-q3dpRAP499jMIfi3Hs,104 +astropy/io/setup_package.pyc,, +astropy/io/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/io/tests/__init__.pyc,, +astropy/io/tests/test_registry.py,sha256=Xt9FMKvJZgQJdElMKzHyq1JqdgUf-4BG96mTz4AEE10,14406 +astropy/io/tests/test_registry.pyc,, +astropy/io/votable/__init__.py,sha256=oB50bd8uK6iy2kKAfnfZwGASHgseMhhfL2cMTSSG1WY,1110 +astropy/io/votable/__init__.pyc,, +astropy/io/votable/connect.py,sha256=48TDEq27l3BLo49kEkALxv6rwW8-lyV4BY8hLR57Kac,5942 +astropy/io/votable/connect.pyc,, +astropy/io/votable/converters.py,sha256=zkAuNeWu8Hdrsj-oeYDoDjzK00WZXa4brlIYj9c7Zyc,42588 +astropy/io/votable/converters.pyc,, +astropy/io/votable/data/VOTable.dtd,sha256=SUApDh5tTZgPBHblHZ_n-1bu0nbYCk3KKCbeHOWkMxA,4743 +astropy/io/votable/data/VOTable.v1.1.xsd,sha256=g8xRsjughA8Z5Su7R3IsluMcXIlpQ_yubHB5NrOaBcU,16813 +astropy/io/votable/data/VOTable.v1.2.xsd,sha256=1nNhe-stXR79eS4iienIQ8XOkHRjsKkhPuiRoGd8410,21521 +astropy/io/votable/data/VOTable.v1.3.xsd,sha256=u2B4-3YJsCcVnGk_-jAOv6x0dsP01DijP-yzZL2NrZg,21829 +astropy/io/votable/data/ucd1p-words.txt,sha256=12dvZQcew-CJQjoM5FwDrW8gyhYMjzGijsDNRDgf1T4,31252 +astropy/io/votable/exceptions.py,sha256=20BsWISdL54z_FHB0wujJW-y8gmCjwYDX4t8Pm6iTD8,46463 +astropy/io/votable/exceptions.pyc,, +astropy/io/votable/setup_package.py,sha256=uKgxcY9xtYGkdHEiAbeG48VaW6p_JZO-eT4zcWWoeNY,769 +astropy/io/votable/setup_package.pyc,, +astropy/io/votable/table.py,sha256=M_7Cs9CINl5Lb4y8AQf1ZvgATO94e82mp51ZwmZRPWg,12769 +astropy/io/votable/table.pyc,, +astropy/io/votable/tablewriter.so,sha256=gX4VCa5aRx6aBz9Otfd6H35Dof9oDJCImzgJacjGrt0,41792 +astropy/io/votable/tests/__init__.py,sha256=ykqVHge2EmIDTMOd96h2DyGHaM_gpp_wKz3K1MlZZic,64 +astropy/io/votable/tests/__init__.pyc,, +astropy/io/votable/tests/converter_test.py,sha256=pWJL6mrn2-1o0hpdu5qAZOu_qAGU9btgNLNHeE1gl7M,7559 +astropy/io/votable/tests/converter_test.pyc,, +astropy/io/votable/tests/data/custom_datatype.xml,sha256=LxhKZMd_1rUCOgHcZdw4EeI3I_FmnoLpcNfXGUkEcEg,475 +astropy/io/votable/tests/data/empty_table.xml,sha256=QzqVRXD8vV6q7lA59lJaOHwzQxfWxrSvP20XPp_WxTs,444 +astropy/io/votable/tests/data/gemini.xml,sha256=ZpEsBEQoDiVLwctVILQtw4w8CKnNiBnGaxziDolcZb0,9440 +astropy/io/votable/tests/data/irsa-nph-error.xml,sha256=-7_dbaET5UdlDmCfELBHYy5TUjfQxhe0E7G8PwNmSmE,213 +astropy/io/votable/tests/data/irsa-nph-m31.xml,sha256=Pl_PyICrWzVBT2Xy2bfrfMcf51YuFaNb92AGX78B65k,9432 +astropy/io/votable/tests/data/names.xml,sha256=lColivRHvR7-Yh3Lv5IcsIW3kybzhFwKT6e7WLX6HfA,10472 +astropy/io/votable/tests/data/no_resource.txt,sha256=2Vu0cy1K6pQLsc5rTXe4g5QbOIjbOhHG6YHqJoDYF7M,200 +astropy/io/votable/tests/data/no_resource.xml,sha256=8ausYMQo9seWGFcKi7cYtvLBth9TTTGnZsqHnMEL07g,259 +astropy/io/votable/tests/data/nonstandard_units.xml,sha256=4786FuAOdUgCu-_jLj-uAynlGYEjYP_RPMU4cvEINsM,556 +astropy/io/votable/tests/data/regression.bin.tabledata.truth.1.1.xml,sha256=b3CDrLUsiwkkQegsEQ0DsbOdUj8IhVPdDLSz7UnDO1M,9196 +astropy/io/votable/tests/data/regression.bin.tabledata.truth.1.3.xml,sha256=8_oLbzfQJpUPnIodu0v0vPUNS--BydPlG4zu5gT4aZE,9058 +astropy/io/votable/tests/data/regression.xml,sha256=r3IskyPbHPfXR5d02slWZuQpkieMg-zOyqZYErmL4bA,9335 +astropy/io/votable/tests/data/tb.fits,sha256=4YkaVFOvLMyhaho9g8lnhepij8ZZpFPfhMkNhTnJ8Kc,8640 +astropy/io/votable/tests/data/too_many_columns.xml.gz,sha256=IZWk7d7RsbRDtkmGAQlGVTWtIisoyFxirJMEP007OGg,1734 +astropy/io/votable/tests/data/validation.txt,sha256=6e6krzn8QIUJ8RpewmG3-VKDvK_QUFT2YJl7aAunU4A,5812 +astropy/io/votable/tests/exception_test.py,sha256=IzGHzIAVmYS3vLrHcBlyCwaZ8BbsoMXOtPtdkiBBHmo,1250 +astropy/io/votable/tests/exception_test.pyc,, +astropy/io/votable/tests/table_test.py,sha256=NY0K_mztKdWHjcv6Lxc4FT9xHPuDVtbUDpekcn33I_k,5623 +astropy/io/votable/tests/table_test.pyc,, +astropy/io/votable/tests/tree_test.py,sha256=rO3q9qb_pISsKZ79Grjh2QkXy8eV7FYJHSzV2639irE,806 +astropy/io/votable/tests/tree_test.pyc,, +astropy/io/votable/tests/ucd_test.py,sha256=b6050mIRR7EIklXPwMpzCcFXCyqUB7G-aAxY3iVXKm0,1685 +astropy/io/votable/tests/ucd_test.pyc,, +astropy/io/votable/tests/util_test.py,sha256=gmY3hsEiYwNzAFV9Prl1JVHCb8RnSS-lnxf1vGR-X4Q,1652 +astropy/io/votable/tests/util_test.pyc,, +astropy/io/votable/tests/vo_test.py,sha256=cPAoJHHYJldnkkGpmKL_DnMUouUsU9oZTRB3p_b3ojo,34773 +astropy/io/votable/tests/vo_test.pyc,, +astropy/io/votable/tree.py,sha256=TGhqOG3SNKUSHnpRNJdIQaPrebx2unJN-58lpl5gHPo,123148 +astropy/io/votable/tree.pyc,, +astropy/io/votable/ucd.py,sha256=dC3h3Wh4g6ceAvHbUTvqwHsazalAIRWw490bROKVFAI,5886 +astropy/io/votable/ucd.pyc,, +astropy/io/votable/util.py,sha256=EAQita8bNLm6nAz0EP0sZ_VQpqchN9eJRz4zkfNoiFE,6377 +astropy/io/votable/util.pyc,, +astropy/io/votable/validator/__init__.py,sha256=WyltlIlT0iOSDieThCAigyNBQj2PpMybCWPicw1ksxM,157 +astropy/io/votable/validator/__init__.pyc,, +astropy/io/votable/validator/html.py,sha256=J3jdcXp2LihJ7s3Y-thBLBwNA5nIo5Dq6K_WJHhgjb4,10225 +astropy/io/votable/validator/html.pyc,, +astropy/io/votable/validator/main.py,sha256=dtQqsm9wuB7zZDXDHA7ahcpOs4Pl_vPq0bU9dAPBltE,4892 +astropy/io/votable/validator/main.pyc,, +astropy/io/votable/validator/result.py,sha256=1_mC5U_26Qw0YXTvLrDpNTp84bAxpd9mbz23lVEHIDE,11544 +astropy/io/votable/validator/result.pyc,, +astropy/io/votable/validator/urls/cone.big.dat.gz,sha256=0LS4lHCk_6qCYuQh2Ztt76Bv1hn0vkrm3Mszxi7ietI,168333 +astropy/io/votable/validator/urls/cone.broken.dat.gz,sha256=bZl8fkJU_PadSrgcLF_S3voNKppeinE9p7HsasuEvDs,350 +astropy/io/votable/validator/urls/cone.good.dat.gz,sha256=542ebGfr2MNYqh80ZneRYy2OeHZvq6YJ2wKCvaqf4V8,168334 +astropy/io/votable/validator/urls/cone.incorrect.dat.gz,sha256=HXLwdnB1XImuLG5spuLE85baSIPRBUF8Jsxi4raZyOw,721 +astropy/io/votable/volint.py,sha256=7gZJdPkQzZvlP8hVDt5mm4npSo3GbK_hC7DPClc3qgw,498 +astropy/io/votable/volint.pyc,, +astropy/io/votable/xmlutil.py,sha256=C32UBrtPFNr3NZfTZDy5YnBD9-XdDiMyrdNC0zJmpKw,3655 +astropy/io/votable/xmlutil.pyc,, +astropy/logger.py,sha256=gn_3hMqYmd72wSZbbxFw5pw0sXwarATwutQkASLPnpk,19912 +astropy/logger.pyc,, +astropy/modeling/__init__.py,sha256=mipgnuDjfH69nQz0NHQLOOr9tSDtOPX-b0PM8VPLEn0,390 +astropy/modeling/__init__.pyc,, +astropy/modeling/_projections.so,sha256=q3kK32x8FmhSM3h_x3GHVGdtv2ua7hFsrDf1kyJ25PA,625096 +astropy/modeling/blackbody.py,sha256=Qs9UFMZZVoKkXkKuHb-DZ1xUEWb7QypGT5MjmY4-hxY,12261 +astropy/modeling/blackbody.pyc,, +astropy/modeling/core.py,sha256=v6Z3eOPepGSEO8XIL9fuAacp-mOhOkiVwYW0Cr-dcxY,131842 +astropy/modeling/core.pyc,, +astropy/modeling/fitting.py,sha256=-KA2BOo3zBQQWB1nGOBOhs4ZJqzVFebTv6Eu_F7KC7o,48807 +astropy/modeling/fitting.pyc,, +astropy/modeling/functional_models.py,sha256=ZISysdIvSTnRZu4nvyMoHezfct7Zm3n1URUdO6yCA7E,74362 +astropy/modeling/functional_models.pyc,, +astropy/modeling/mappings.py,sha256=IMgy6umow4oGZU5lpLPb3E-UIwgz78iXiOrci2Nd1TM,5581 +astropy/modeling/mappings.pyc,, +astropy/modeling/models.py,sha256=Xr4LnVU0YlDmN9qB91DIRSMDsEgQtO3DrCToLu3_wJ8,2598 +astropy/modeling/models.pyc,, +astropy/modeling/optimizers.py,sha256=_SPXh7IQ9leiE7JOMQWQumBWwQbLwlYih_-dy48xqc0,7360 +astropy/modeling/optimizers.pyc,, +astropy/modeling/parameters.py,sha256=VWDAPy7MlR5hOsumKueqWh0LjpGIVJWRWgmscYHjm-A,34050 +astropy/modeling/parameters.pyc,, +astropy/modeling/polynomial.py,sha256=aJaj_nU47e5mua3eQcEgPcSxE9BeiNR4S_cWBQLVg1o,48618 +astropy/modeling/polynomial.pyc,, +astropy/modeling/powerlaws.py,sha256=CpoHyC6cYqVKg1evRmOyPEoRHF8sql3fq9ksWt7lxns,16175 +astropy/modeling/powerlaws.pyc,, +astropy/modeling/projections.py,sha256=hpdkneJdjyLGjLjA3PfmYKk-rS9qu27UhW2a1KIkYOY,53662 +astropy/modeling/projections.pyc,, +astropy/modeling/rotations.py,sha256=r_bcFgSCuBTDzHyLZeweHttjbSWcnPb6N-IwwwcFFu0,13350 +astropy/modeling/rotations.pyc,, +astropy/modeling/setup_package.py,sha256=RNbqLM224J22EBwf_tSr0pU1wacMGwFM5NJbCm1USxI,4615 +astropy/modeling/setup_package.pyc,, +astropy/modeling/statistic.py,sha256=ihQDP7HQR7leNZMp2t9sFJgDAciiNhLKZUr1PLJ2Q0U,1261 +astropy/modeling/statistic.pyc,, +astropy/modeling/tabular.py,sha256=335MqqBEehq_quCvN9LOBRU680SrW1I3bTdoFURxu7U,9476 +astropy/modeling/tabular.pyc,, +astropy/modeling/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/modeling/tests/__init__.pyc,, +astropy/modeling/tests/data/1904-66_AZP.fits,sha256=UdlUUNNctsjGClnnLmk7cSeudgfOzlkFIG9kawpMAkY,161280 +astropy/modeling/tests/data/__init__.py,sha256=M1f7jkPOdRuYfiO4C4DJZszhgDc51-q6Godu3uAPYQg,128 +astropy/modeling/tests/data/__init__.pyc,, +astropy/modeling/tests/data/hst_sip.hdr,sha256=CsWzGHfx5hu827AkZi2g3njjc_Ybqq8qRjYZrpqrza4,3305 +astropy/modeling/tests/data/idcompspec.fits,sha256=JGJZ6Pr3VAuNRxBJvfZblFHD22g8O-8ZI2KxLaBtvog,823 +astropy/modeling/tests/data/irac_sip.hdr,sha256=nXK74F5W0GD2N2x7vr31fvThf1jwyor5qicAGelfo_A,19520 +astropy/modeling/tests/example_models.py,sha256=OvMU6ibfTcYcsU5PLLxQ-squGbJRBIIQrwHyFQ9-Q_4,9891 +astropy/modeling/tests/example_models.pyc,, +astropy/modeling/tests/irafutil.py,sha256=L6EfHwXGmVFJB7HRVuBha30A0NzTt1Nswdzou6rcqeA,7253 +astropy/modeling/tests/irafutil.pyc,, +astropy/modeling/tests/test_blackbody.py,sha256=xx0ioLQZjQOI9ux1qgJPvSAEcwepVlquYJc62Y4tzig,4922 +astropy/modeling/tests/test_blackbody.pyc,, +astropy/modeling/tests/test_compound.py,sha256=PMQ3NWeUzVE-ZGNw6lY3WModAxivkeqqbIVukqpZNF4,29786 +astropy/modeling/tests/test_compound.pyc,, +astropy/modeling/tests/test_constraints.py,sha256=ntlrB8Hiu0YjzmlIICgLLr_268DLzL3Pje9MtWUUeIg,20534 +astropy/modeling/tests/test_constraints.pyc,, +astropy/modeling/tests/test_core.py,sha256=osNqWcGh8l8EJYd-ayvbd5NVzij8CiaCRoNAVGam8ew,11241 +astropy/modeling/tests/test_core.pyc,, +astropy/modeling/tests/test_fitters.py,sha256=q3RqvP_Xyy6HGwFoPBUCxvvWwcaZhDdbZ-a20EDJJPU,29587 +astropy/modeling/tests/test_fitters.pyc,, +astropy/modeling/tests/test_functional_models.py,sha256=FH1NSf2tCCSuIMNn2ewt0_IGujsPFcbqri6L7UzFtJc,8950 +astropy/modeling/tests/test_functional_models.pyc,, +astropy/modeling/tests/test_input.py,sha256=Xr0XaaPggQ_iIWeRfOHIip4iYRp9cwshEzrwJ7sQcM0,29834 +astropy/modeling/tests/test_input.pyc,, +astropy/modeling/tests/test_mappings.py,sha256=K4m0NlsJSrywzQVVT5lbOhWnpsYTojQ_roNvOzmQaKU,2655 +astropy/modeling/tests/test_mappings.pyc,, +astropy/modeling/tests/test_models.py,sha256=Baqmi-mldkW0Y0zdtixlYtzzOSNM2eCgJ5_08yfOg18,21744 +astropy/modeling/tests/test_models.pyc,, +astropy/modeling/tests/test_models_quantities.py,sha256=5H1Kkb5k9PFTdrTbriXP3UqITWFJlz2u8HPuALTXOkQ,13322 +astropy/modeling/tests/test_models_quantities.pyc,, +astropy/modeling/tests/test_parameters.py,sha256=P3ugePv3FMHknNeJbCg99rBve6nQyrOhbiZwBBqqW0A,21475 +astropy/modeling/tests/test_parameters.pyc,, +astropy/modeling/tests/test_polynomial.py,sha256=seDnN1z4AMRl5o8wdWCfnr5I5HhKryyp_qp0CVDGfS4,13167 +astropy/modeling/tests/test_polynomial.pyc,, +astropy/modeling/tests/test_projections.py,sha256=veV1kNcFtWLK59EcWjUsq63MITgWo-IdPJuooGLZZNk,10454 +astropy/modeling/tests/test_projections.pyc,, +astropy/modeling/tests/test_quantities_evaluation.py,sha256=NbPUlbvG4obcVhcdrnXoMQSJD2r0TipFownSo80o0LU,6690 +astropy/modeling/tests/test_quantities_evaluation.pyc,, +astropy/modeling/tests/test_quantities_fitting.py,sha256=Lqkm7hXpZ9_u1hQZJpYkxRyfqll_i5omGFDcDaeTZKc,4274 +astropy/modeling/tests/test_quantities_fitting.pyc,, +astropy/modeling/tests/test_quantities_model.py,sha256=yR0nPg_JHnoQflNQUmBgNw8ynazGJK7oeyRewkGpL9E,1053 +astropy/modeling/tests/test_quantities_model.pyc,, +astropy/modeling/tests/test_quantities_parameters.py,sha256=354zyUNZJZqkoAcbMPQEZyJZjNDcgCJl8YkUfgsfcB8,12018 +astropy/modeling/tests/test_quantities_parameters.pyc,, +astropy/modeling/tests/test_quantities_rotations.py,sha256=mUdlvRa8uYFMQ8aqQr26IsJW_tHmrpneAn-8RAnTSns,4140 +astropy/modeling/tests/test_quantities_rotations.pyc,, +astropy/modeling/tests/test_rotations.py,sha256=VgeV4vZvnq8xz6rVTQEdJpAVKUbvfBjyN20dbjPPNMs,4499 +astropy/modeling/tests/test_rotations.pyc,, +astropy/modeling/tests/test_utils.py,sha256=HZd2TMiSzZ38vifDLgwg4P8ilYZc7rRuIm0DwIspry8,3177 +astropy/modeling/tests/test_utils.pyc,, +astropy/modeling/tests/utils.py,sha256=mVyoX_qGI012iNmUURIoF1U_M-PpQdCxC80nBpcL6gk,669 +astropy/modeling/tests/utils.pyc,, +astropy/modeling/utils.py,sha256=P8NiBph0N7LQIiseQ7iSehP59yC7Daho6lVWE29d3jg,21197 +astropy/modeling/utils.pyc,, +astropy/nddata/__init__.py,sha256=BFtkhC87t1WVT1G5_fHmSCfHLp7W_ZQPu-hAeMsJIlQ,1476 +astropy/nddata/__init__.pyc,, +astropy/nddata/ccddata.py,sha256=uk5cjMMeF5_pNTiSehRdZ2zNI5uL_AG9ePxZjJ2E5MM,22573 +astropy/nddata/ccddata.pyc,, +astropy/nddata/compat.py,sha256=MYr_6nhnPHM7KzYaoN32fWh8SoxxByBr4JY78IYPSV4,10537 +astropy/nddata/compat.pyc,, +astropy/nddata/decorators.py,sha256=ZzBqg-Cx_xMYt370qScaE9t8xJKSkMr-cxXwcHZWPhk,11893 +astropy/nddata/decorators.pyc,, +astropy/nddata/flag_collection.py,sha256=m4nPYBA4X5YekWNLQbz2VLx1RB8UW7-xyyKU4WV7fy4,1767 +astropy/nddata/flag_collection.pyc,, +astropy/nddata/mixins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/nddata/mixins/__init__.pyc,, +astropy/nddata/mixins/ndarithmetic.py,sha256=7EeEBTqMWdwHmjdPxJdBcFUIGYD5gT2OUTUvFVuDujs,24362 +astropy/nddata/mixins/ndarithmetic.pyc,, +astropy/nddata/mixins/ndio.py,sha256=IIVeHe30pqMegQzjWV7Q9MI-DZYUciIZW8w3VLSjPbA,1256 +astropy/nddata/mixins/ndio.pyc,, +astropy/nddata/mixins/ndslicing.py,sha256=ih_uQv5qR_mLjD9wwdotF-M2HtJG4zSpz7X7v1LgUiE,3898 +astropy/nddata/mixins/ndslicing.pyc,, +astropy/nddata/mixins/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/nddata/mixins/tests/__init__.pyc,, +astropy/nddata/mixins/tests/test_ndarithmetic.py,sha256=GUqbpL2iIz5s8hCSWlL-JdaglGIrVIztNPObAljN2f8,30541 +astropy/nddata/mixins/tests/test_ndarithmetic.pyc,, +astropy/nddata/mixins/tests/test_ndio.py,sha256=F66O84fckFnneUaAyit49T_9GV_QQiPDzKZxXYDqd3Y,372 +astropy/nddata/mixins/tests/test_ndio.pyc,, +astropy/nddata/mixins/tests/test_ndslicing.py,sha256=fANSzX0qac6Of7KfD4Clxejm4gV-hRZdbXyQXxFXzoo,5005 +astropy/nddata/mixins/tests/test_ndslicing.pyc,, +astropy/nddata/nddata.py,sha256=i8FUnwo6zBraxfI2Ph_nSqsOnYvbJ5rKv4vPnRA8Npw,11806 +astropy/nddata/nddata.pyc,, +astropy/nddata/nddata_base.py,sha256=fljnsbEmQltK_nOMuEk_Jv-dF7AUvz6i5tErSp7VpSU,1835 +astropy/nddata/nddata_base.pyc,, +astropy/nddata/nddata_withmixins.py,sha256=lT93o_aUMtq9UgabGaIetQ6aEos4RSV8JRnIKmpWWw8,2334 +astropy/nddata/nddata_withmixins.pyc,, +astropy/nddata/nduncertainty.py,sha256=gbqH6nyELgVGrPhV_A4PgLCJhOX7B0ufzTRNNa3IjeU,25602 +astropy/nddata/nduncertainty.pyc,, +astropy/nddata/setup_package.py,sha256=7plGiBJHBR7zYL0-ycKIprIwvbdviPA1d2T0KDmbbOw,183 +astropy/nddata/setup_package.pyc,, +astropy/nddata/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/nddata/tests/__init__.pyc,, +astropy/nddata/tests/data/sip-wcs.fits,sha256=czYeIP9Jb4JkuhFKZ1udVBh2kybYmGxduH2SaGzB2so,23040 +astropy/nddata/tests/test_ccddata.py,sha256=M33jwmQ0TbHMwEnH9CxP_8PL_Y1KqVvZ_ot0cH2u8GM,33590 +astropy/nddata/tests/test_ccddata.pyc,, +astropy/nddata/tests/test_compat.py,sha256=OvziwMDd-5wZUpJu-bYWIL-6D55bKvJLsP7KdcJdCb8,5004 +astropy/nddata/tests/test_compat.pyc,, +astropy/nddata/tests/test_decorators.py,sha256=BvG1uXA_q0y2tcbjTlwuTC_TSmUUv2rFj4xcm2ZUbc0,9790 +astropy/nddata/tests/test_decorators.pyc,, +astropy/nddata/tests/test_flag_collection.py,sha256=yuFDZ7AV1gcgRI84ldBfqcp83E9ilTElN7iZOvdN6dQ,1607 +astropy/nddata/tests/test_flag_collection.pyc,, +astropy/nddata/tests/test_nddata.py,sha256=uGyru9asPO3QQ_Juon_CjCcF_nK4m7jjgG0BFI1-pXw,12582 +astropy/nddata/tests/test_nddata.pyc,, +astropy/nddata/tests/test_nddata_base.py,sha256=PcNEsDv5i1QtX8tTyeC82XUbROQUFEhyOTrN0-ud2BE,1053 +astropy/nddata/tests/test_nddata_base.pyc,, +astropy/nddata/tests/test_nduncertainty.py,sha256=7KIx1-p5MFL-Ah77cFdOk5HZsbA-o7mvVC95Fjo1v_g,9044 +astropy/nddata/tests/test_nduncertainty.pyc,, +astropy/nddata/tests/test_utils.py,sha256=LK8uy4WxKb2jQOWoM0vgiaKrPKbcXF0xBRCUp3q7v5Y,17815 +astropy/nddata/tests/test_utils.pyc,, +astropy/nddata/utils.py,sha256=eonJZpyHAam2FcYtXTLs21wGICFM_MF_8sPGgeVRCG4,34374 +astropy/nddata/utils.pyc,, +astropy/samp/__init__.py,sha256=-sqXx1cbi6RnGsj7dgSvBV58WSOf9puQpoBsDwIVIBY,1038 +astropy/samp/__init__.pyc,, +astropy/samp/client.py,sha256=Z6H1jMJCUOo78409KFZm8nKaHRN0NPDgM8-1CiGwndc,25622 +astropy/samp/client.pyc,, +astropy/samp/constants.py,sha256=Ma8eT384wPOkuMNXB_9aHWlEYHMq8JdW9dATfO5o9Sg,896 +astropy/samp/constants.pyc,, +astropy/samp/data/astropy_icon.png,sha256=anxCPITjfCCiKWJrjEZMYCBVWvKxq2tP-Rlh-W5Pk5s,1434 +astropy/samp/data/clientaccesspolicy.xml,sha256=m1iU7PmY6w3H-oTxK1BkrvSAIuhTaJrUOY212bixXZI,284 +astropy/samp/data/crossdomain.xml,sha256=AzMTGKyFVzw636h5aNgCn9nG-BwUg6XwVIh0JyXC-GE,311 +astropy/samp/errors.py,sha256=bZbPiaoaicAKFxksJcDxqn3iS0sR33gjRflto2r2FYI,763 +astropy/samp/errors.pyc,, +astropy/samp/hub.py,sha256=sqs8uBJdPeklw2nrrb90Ckvq30og99DUseTrXa9X7oA,56033 +astropy/samp/hub.pyc,, +astropy/samp/hub_proxy.py,sha256=teFpP4hJlw7YfRdcA_Sv-YyXYWV80FtRwFJ4KQqTGIY,6306 +astropy/samp/hub_proxy.pyc,, +astropy/samp/hub_script.py,sha256=8cb55ImmnsHFU27-Ydt--pWU5bMXH1tj0esb3kqDy_s,6689 +astropy/samp/hub_script.pyc,, +astropy/samp/integrated_client.py,sha256=1eQ83L2zo0mSk_n3iCh4kDvwI-nev85mNrgk9sIhYRk,17315 +astropy/samp/integrated_client.pyc,, +astropy/samp/lockfile_helpers.py,sha256=q6p7vntxB6S3J8WQ7u5VsMtobE4-445pzkxeQt8zXu4,8347 +astropy/samp/lockfile_helpers.pyc,, +astropy/samp/setup_package.py,sha256=8gxqer_X855X67zSKpBsvwtpN9xbtyCc9OIzq-r0vhU,325 +astropy/samp/setup_package.pyc,, +astropy/samp/standard_profile.py,sha256=BUtg-Ifl3i4tiYx1Jyh6q4jB6k6V0oMVO-HtA4UjwN8,5906 +astropy/samp/standard_profile.pyc,, +astropy/samp/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/samp/tests/__init__.pyc,, +astropy/samp/tests/test_client.py,sha256=8TCJwWs_hOBIMgkyA5eNyGc22M5gw2rkezzpctgC9qI,1213 +astropy/samp/tests/test_client.pyc,, +astropy/samp/tests/test_errors.py,sha256=eGMQF5hZKLGPdO8V9G_zGVShhFA7_KdEiTRFqcOldYg,597 +astropy/samp/tests/test_errors.pyc,, +astropy/samp/tests/test_helpers.py,sha256=sMqO7lTWejNC8KDZeek4dp9s6O47Pll6ZzlmmXkc4h8,2213 +astropy/samp/tests/test_helpers.pyc,, +astropy/samp/tests/test_hub.py,sha256=P8px-sUByxhCct873HFqCWcRjPq1KcfZi9BfO4o3R_g,888 +astropy/samp/tests/test_hub.pyc,, +astropy/samp/tests/test_hub_proxy.py,sha256=BC9r8BdhT6YqLZMmJA2_3RszoLu77t1aPyUOy05GK8c,1178 +astropy/samp/tests/test_hub_proxy.pyc,, +astropy/samp/tests/test_hub_script.py,sha256=qHJX8T6DaZE4Xe-ZcmlqatT8xsdv3U68QoMXrfb9mnQ,448 +astropy/samp/tests/test_hub_script.pyc,, +astropy/samp/tests/test_standard_profile.py,sha256=2Zax1Gl6LjmEK5M-deZO5Xr4KUQnh5HG-5uXk5fZhFg,8599 +astropy/samp/tests/test_standard_profile.pyc,, +astropy/samp/tests/test_web_profile.py,sha256=llMTC3EzhgFIqfv-B8fmpot4oWt_RNAyLgRC4biIJuc,3319 +astropy/samp/tests/test_web_profile.pyc,, +astropy/samp/tests/web_profile_test_helpers.py,sha256=-LJtHlprpjfYY3blEaMXVkEH8wqe2fOPNmPvEqOMR8w,8820 +astropy/samp/tests/web_profile_test_helpers.pyc,, +astropy/samp/utils.py,sha256=wyDGv_0xKZZxFR1wKcEgvCBT17rsaRigjoiHGrHI3Sg,4611 +astropy/samp/utils.pyc,, +astropy/samp/web_profile.py,sha256=bGn4h8h751lrWGmefLuajimziDooY-S3A5Hde1sTeiA,5804 +astropy/samp/web_profile.pyc,, +astropy/setup_package.py,sha256=BMJQ3GyeGUp6So6QuNYTa-iqfpt3SSggl7ArAd77Wow,142 +astropy/setup_package.pyc,, +astropy/stats/__init__.py,sha256=VykOUhDFaBr9cXjESRrXkqwbFOJXwaG5TMGYXc28R9o,768 +astropy/stats/__init__.pyc,, +astropy/stats/bayesian_blocks.py,sha256=xUDGpwqwFPpj2zwjk-YA-1LjY_GgX0GvpvqZf-bqN1o,18814 +astropy/stats/bayesian_blocks.pyc,, +astropy/stats/biweight.py,sha256=h5EETjIBb7UGe5bD63bHB1rN7DqVBXSO6yO_Y8xgPqI,24261 +astropy/stats/biweight.pyc,, +astropy/stats/circstats.py,sha256=qyi4hq850kNkyYyE92x0Q_j_37qnTuson7xON92bKi8,17037 +astropy/stats/circstats.pyc,, +astropy/stats/funcs.py,sha256=720YSvbdJWdgYCPNTMixadT06C-8D6GiUQrWUgfBq5k,47569 +astropy/stats/funcs.pyc,, +astropy/stats/histogram.py,sha256=wJYLpv-FkRWkArcuONIr9t7Fcpb6MEww8qVQH-7gXbU,10675 +astropy/stats/histogram.pyc,, +astropy/stats/info_theory.py,sha256=y2NjwRRdGwpg5e6bljrMy2dYlVe-zqK-csdrV9iin40,15129 +astropy/stats/info_theory.pyc,, +astropy/stats/jackknife.py,sha256=FTw5kXiiHQNf8vbP8WWMjo2uUNLXBebQ7D3g8hTM4lo,6109 +astropy/stats/jackknife.pyc,, +astropy/stats/lombscargle/__init__.py,sha256=bOfKB5Q_Vgv2T03tM1aR5Hl_0V47YSPTx13nr4wtOP0,194 +astropy/stats/lombscargle/__init__.pyc,, +astropy/stats/lombscargle/core.py,sha256=VJu8KH0RsQn54KZ6mwtvkEVMu7d1UoPelYZ_U6d5FX0,15194 +astropy/stats/lombscargle/core.pyc,, +astropy/stats/lombscargle/implementations/__init__.py,sha256=YoSLtFWsMova_XKbc1cNxWG-qPeUs_nynBoITWO4foc,322 +astropy/stats/lombscargle/implementations/__init__.pyc,, +astropy/stats/lombscargle/implementations/chi2_impl.py,sha256=qewlxAVKpBjTDcveYLId4g9NTVPfQBqHFBdqRd8z1VA,3015 +astropy/stats/lombscargle/implementations/chi2_impl.pyc,, +astropy/stats/lombscargle/implementations/cython_impl.so,sha256=4G0HIvMleT0ZAXygDK9sLblyJYLH9CMnrZzO-1t7PL4,808432 +astropy/stats/lombscargle/implementations/fast_impl.py,sha256=Rv8i114jHHRInmNemazf3yB4t857Ma2s9S7dZnajHMo,4973 +astropy/stats/lombscargle/implementations/fast_impl.pyc,, +astropy/stats/lombscargle/implementations/fastchi2_impl.py,sha256=tFlL86rpqbcOseUjKsZ3zPkXv5t-0NlX28UrvnXZKaM,5085 +astropy/stats/lombscargle/implementations/fastchi2_impl.pyc,, +astropy/stats/lombscargle/implementations/main.py,sha256=3dpO1yryDzEDsS1kr0kXGqKDTx6EGQKhXbzftp8U2j4,8214 +astropy/stats/lombscargle/implementations/main.pyc,, +astropy/stats/lombscargle/implementations/mle.py,sha256=s412qC6pc-WAjA7SyZryVu0fg6Iw0GHGUTd3mtM2D1E,3320 +astropy/stats/lombscargle/implementations/mle.pyc,, +astropy/stats/lombscargle/implementations/scipy_impl.py,sha256=qxv_W8QvQoV9Ri8ewtv6LXQSxGarHwNH1iOJg3-x8R0,2523 +astropy/stats/lombscargle/implementations/scipy_impl.pyc,, +astropy/stats/lombscargle/implementations/slow_impl.py,sha256=P4HH0S03nx3FWaoxM99wMufxeHt13U5Jf1Hx5e6hJ_E,3913 +astropy/stats/lombscargle/implementations/slow_impl.pyc,, +astropy/stats/lombscargle/implementations/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/stats/lombscargle/implementations/tests/__init__.pyc,, +astropy/stats/lombscargle/implementations/tests/test_mle.py,sha256=FgCqUGzBqyCUAue3QxQUttsQ3okt5xqKyvriJXCOBsI,1922 +astropy/stats/lombscargle/implementations/tests/test_mle.pyc,, +astropy/stats/lombscargle/implementations/tests/test_utils.py,sha256=4jy-jLZ3m2dS-v8EXe50n5Qrioph-kDjUOTHfQiP3zU,2388 +astropy/stats/lombscargle/implementations/tests/test_utils.pyc,, +astropy/stats/lombscargle/implementations/utils.py,sha256=2eC6QA4_0yNPdFaJo509fws5l3K7e3A_4qEos2d7A1M,5913 +astropy/stats/lombscargle/implementations/utils.pyc,, +astropy/stats/lombscargle/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/stats/lombscargle/tests/__init__.pyc,, +astropy/stats/lombscargle/tests/test_lombscargle.py,sha256=6FQp18FffXceMFc8Fvjb4-8b7emkUKNvf_oJ4ru3MQ0,16935 +astropy/stats/lombscargle/tests/test_lombscargle.pyc,, +astropy/stats/setup_package.py,sha256=CmVIib0eRCfKQmyaFuNG0SORkuN7YvwGir-_srMohRQ,38 +astropy/stats/setup_package.pyc,, +astropy/stats/sigma_clipping.py,sha256=ocvBd1H0p85ADrPLhV7oEaKuq_t4nYL52XvFB9_cYp0,18283 +astropy/stats/sigma_clipping.pyc,, +astropy/stats/spatial.py,sha256=879LiiEXpTxXuRp54pbkYbNBt8zFidDBwUn68Tuh4ok,12996 +astropy/stats/spatial.pyc,, +astropy/stats/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/stats/tests/__init__.pyc,, +astropy/stats/tests/test_bayesian_blocks.py,sha256=V9NB-fsCNmdQvyTi20mYWw3pwLIeA6ts7m53_PbYIYU,4314 +astropy/stats/tests/test_bayesian_blocks.pyc,, +astropy/stats/tests/test_biweight.py,sha256=X_lwiSsdJhISd8s_o6x5lq4L0eNWr3eGvVt2Olrshog,9989 +astropy/stats/tests/test_biweight.pyc,, +astropy/stats/tests/test_circstats.py,sha256=YCIFZHZARX2S-12yrLcmlYvmc4N3fLLTxYz1S_m6V-U,4334 +astropy/stats/tests/test_circstats.pyc,, +astropy/stats/tests/test_funcs.py,sha256=FbFOqttbfGUsOQgeEdeBcUmvY_48kokB9QR43byOF5k,23840 +astropy/stats/tests/test_funcs.pyc,, +astropy/stats/tests/test_histogram.py,sha256=3p22ipGDp0JhCJtc5FMDDgM-3_ug3TO2SySvHr27Y-A,4943 +astropy/stats/tests/test_histogram.pyc,, +astropy/stats/tests/test_info_theory.py,sha256=T4V9mH3zAHOdCthK_JFeqSv0L9BzKQZqNqWOOOEShfk,2840 +astropy/stats/tests/test_info_theory.pyc,, +astropy/stats/tests/test_jackknife.py,sha256=78LW6ffP9R2UsoagWGvjGz1T2wHCgrlm2lJJxx-oNbs,2027 +astropy/stats/tests/test_jackknife.pyc,, +astropy/stats/tests/test_sigma_clipping.py,sha256=QhbAWayMSJOMy9N5N1mME5UPRV_9I3eZzqzkA-ER5MM,6330 +astropy/stats/tests/test_sigma_clipping.pyc,, +astropy/stats/tests/test_spatial.py,sha256=2vSr7EFhZRlk7fH1Ubmkd5gylMRkKRTgGXwYeXI9OrE,5827 +astropy/stats/tests/test_spatial.pyc,, +astropy/table/__init__.py,sha256=mJNKnMKpaBtPneyP3EMFHS6p99gZOQU3VdY3qeabZSw,2506 +astropy/table/__init__.pyc,, +astropy/table/_column_mixins.so,sha256=TSPZzMGN9HkFVwca2Eja13aEeqwfee5LDR6c8mssHVo,307104 +astropy/table/_np_utils.so,sha256=Mwan3x3wc9sWSddlUMyLxDt1dvcHNr0erfLeFMozZEA,282576 +astropy/table/bst.py,sha256=qDRjHPRSQp-cgbbEOO8bFfEGbU2m7DeMKHrdTP4UrOk,19544 +astropy/table/bst.pyc,, +astropy/table/column.py,sha256=o2iKRc8IzhR88mZjAhLhho359QdghCC2OJmWDoIyIBk,48635 +astropy/table/column.pyc,, +astropy/table/groups.py,sha256=L24cyfpvABmI1GAYgsqPq7waf27byQ7wo-Lht3OkGVY,13867 +astropy/table/groups.pyc,, +astropy/table/index.py,sha256=98nRkEguWBvLl9OaV7BAwQmIFYr68uaRIN6lcf088Yk,28129 +astropy/table/index.pyc,, +astropy/table/info.py,sha256=R2FeOlqN2UubC2BB2tbQPlqZmad1he9XTFgSy8aAdhw,3769 +astropy/table/info.pyc,, +astropy/table/jsviewer.py,sha256=YnWIgXXxrIC_wjH7aUT_8aqbN3Y3FApk-iRVG_X8e5c,6430 +astropy/table/jsviewer.pyc,, +astropy/table/meta.py,sha256=b278Sm0bxTOxHoCFJeyMl3rCOJJbvqVbNkk86Ddd_IY,11035 +astropy/table/meta.pyc,, +astropy/table/np_utils.py,sha256=JyCl1TISWkqFQpflw3ThvruRY2gAcZvyFX4Kv9bfaiI,7683 +astropy/table/np_utils.pyc,, +astropy/table/operations.py,sha256=EZu9cGSLUC9_yRUIbjF9PDfbIgZeWAh8YUJEshrYZ4Q,32868 +astropy/table/operations.pyc,, +astropy/table/pprint.py,sha256=iyY6paBa9zNCYZ0TI0P84X19qZmPhtKhFS9Y7pE4tnE,27373 +astropy/table/pprint.pyc,, +astropy/table/row.py,sha256=qrdErFYQoqvjCSBTOPilfZ-sTc8RV64Vg7aeNvx_17Y,5647 +astropy/table/row.pyc,, +astropy/table/serialize.py,sha256=yLkSzr2dcILGFBezUUNc0r1ybBxUxVO2UDMZ2Sk1AUc,8245 +astropy/table/serialize.pyc,, +astropy/table/setup_package.py,sha256=rY2ZOj6aLz8wyNxcbIzRyAYDFxC2RsxZXCwImFtkmXM,593 +astropy/table/setup_package.pyc,, +astropy/table/sorted_array.py,sha256=jIRO4g2lJixjNw_u9lOTb3-kr5w8QbEqmbSZK98X3Cg,9489 +astropy/table/sorted_array.pyc,, +astropy/table/table.py,sha256=w9HXcHX8hPIGglG0Go1Nj5q6UlqtoB5hgEn_A4gDUsY,105638 +astropy/table/table.pyc,, +astropy/table/table_helpers.py,sha256=zIvdFAX_4BUdJ-eIzVkR-u3DeD5rlp_MDc-SVYqAvoo,5492 +astropy/table/table_helpers.pyc,, +astropy/table/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/table/tests/__init__.pyc,, +astropy/table/tests/conftest.py,sha256=TqyPYhgqLfiWtLBTgEWk6O7h6zHygCrdtKF0ojSfDt8,6139 +astropy/table/tests/conftest.pyc,, +astropy/table/tests/test_array.py,sha256=GNVkI3mZ6N5zsWkm4671YL9MsYcBKo5f8abDKWPKPjY,1361 +astropy/table/tests/test_array.pyc,, +astropy/table/tests/test_bst.py,sha256=IeZ3k7KVM7d8iPnuph0JQhHzq89Me5FtjM7pTPnkz-Y,2661 +astropy/table/tests/test_bst.pyc,, +astropy/table/tests/test_column.py,sha256=LUrMQhCKqDxsArm1bl5eRWSbFZRCIAQjB-c6wp72Xng,30068 +astropy/table/tests/test_column.pyc,, +astropy/table/tests/test_groups.py,sha256=yA1vZ3urhiS3TbDjBGqkTdclk23mFkp0Qr9EQB3F8I0,20114 +astropy/table/tests/test_groups.pyc,, +astropy/table/tests/test_index.py,sha256=mPheLHJQkwNMWPr2jJByt3dm6m1XXbZzjxUa4BNLLNs,16636 +astropy/table/tests/test_index.pyc,, +astropy/table/tests/test_info.py,sha256=0JPxdPQAYedjYJfjikNSLyMlIqz-Xno4bZgnORpTUGo,9018 +astropy/table/tests/test_info.pyc,, +astropy/table/tests/test_init_table.py,sha256=kX_o118D8LxO8pm7CrbVyL_Qk_f6EbOkFrxAAC3MkYQ,18369 +astropy/table/tests/test_init_table.pyc,, +astropy/table/tests/test_item_access.py,sha256=nFnkA_JjEoUS7JSJjijor9lRPcY5wIA-s9oR11YTbbU,9369 +astropy/table/tests/test_item_access.pyc,, +astropy/table/tests/test_jsviewer.py,sha256=vJ-v7DmWEcURZBbDWykG8PoFhy3JaHWjvLT0POK6uHE,5473 +astropy/table/tests/test_jsviewer.pyc,, +astropy/table/tests/test_masked.py,sha256=aXdIhDAuzeEXCeI7QVRC5Wm9lfSZ9OzsNI9NnC7pA74,16855 +astropy/table/tests/test_masked.pyc,, +astropy/table/tests/test_mixin.py,sha256=3KVDJrMGHIZAzXd3Lqgxg9edvRxKSLQdY6roQz36f1M,20315 +astropy/table/tests/test_mixin.pyc,, +astropy/table/tests/test_np_utils.py,sha256=TA9UyEJpYLvV8RbYyQK-wwotU7s3PkfW1FLjCD7jZDg,2054 +astropy/table/tests/test_np_utils.pyc,, +astropy/table/tests/test_operations.py,sha256=mBOlaVqqz992B8tclJDTDJqlT7sBLVr43TuPWwELYv0,47507 +astropy/table/tests/test_operations.pyc,, +astropy/table/tests/test_pickle.py,sha256=oGz3x1X6iT8kqdLx2QVdK1LBI5bQAiG57EL0HSqcJLQ,3842 +astropy/table/tests/test_pickle.pyc,, +astropy/table/tests/test_pprint.py,sha256=oSpUgsW8CYEIw8ixnAB5F62vwcLBteoMw4CJ4_UVrCk,27741 +astropy/table/tests/test_pprint.pyc,, +astropy/table/tests/test_row.py,sha256=7yeNtwPavLJYkTHuyc6eorD8KBqMWg4pXT2en0dyzLA,8506 +astropy/table/tests/test_row.pyc,, +astropy/table/tests/test_subclass.py,sha256=ywsPX8K3oB85QJhLD2zHeJT9S9rEUen_8UISqSqDyn0,2488 +astropy/table/tests/test_subclass.pyc,, +astropy/table/tests/test_table.py,sha256=I5w9HRwV90-HRQFtR3c8JWcpaUZq4pbZ1aQDoVQx5Ic,69302 +astropy/table/tests/test_table.pyc,, +astropy/tests/__init__.py,sha256=GGtClKFSEHS9T5CUF8EwAbyHMuNyFq05FEmCH6z04cM,1286 +astropy/tests/__init__.pyc,, +astropy/tests/command.py,sha256=WvDEan8-46bwOvPGbE8l1gO-j2m2oos7kgiPwdhl_90,13038 +astropy/tests/command.pyc,, +astropy/tests/coveragerc,sha256=eI4pFu9hDxybjjbWTbN5hiw3wa4cCgohKx3r4oWNN0c,734 +astropy/tests/disable_internet.py,sha256=6m1flJ-Q3al-QW8Xs1UD6JMcSjump6UWbsFLuv_on20,5274 +astropy/tests/disable_internet.pyc,, +astropy/tests/helper.py,sha256=1HUW9KUE2cFWmDR2dG9o8Dl1zu1-G9sxsUk0tKle6S4,20194 +astropy/tests/helper.pyc,, +astropy/tests/image_tests.py,sha256=EMRLxbaXXI6MeZ1aITOIOwTyRvmTrOWoT0QtT--_fKs,745 +astropy/tests/image_tests.pyc,, +astropy/tests/output_checker.py,sha256=SGUbpcKB0a8FUDrbkQRmDpCe6Oi5-ybttXyaUcealnY,7300 +astropy/tests/output_checker.pyc,, +astropy/tests/pytest_plugins.py,sha256=SsRvg_WPKc4GrIVufthRU1sd3KDh_ES2jrF4NlwXhOE,14089 +astropy/tests/pytest_plugins.pyc,, +astropy/tests/pytest_repeat.py,sha256=4eDrXroCu5dRUg-84tbCT0CZqQxPsRUn_KBG5s18jWg,917 +astropy/tests/pytest_repeat.pyc,, +astropy/tests/runner.py,sha256=xu3_OCnmdv-76Amc7njNPELMLPJIbwsa1NSmqpJNwuU,19170 +astropy/tests/runner.pyc,, +astropy/tests/setup_package.py,sha256=J_RrGmTipBpJTU_yv09dE_dkSEZAoXyivjDYdm8aiH4,249 +astropy/tests/setup_package.pyc,, +astropy/tests/test_logger.py,sha256=-ZsWJ88qiaoN4PMSJ2tM9jc7RpTGDg7VMbDsJT4eGbI,15539 +astropy/tests/test_logger.pyc,, +astropy/tests/tests/__init__.py,sha256=vnxqCkx7tTOfFMhgE7GuShCWxdMidhLZOL13el78R68,109 +astropy/tests/tests/__init__.pyc,, +astropy/tests/tests/data/open_file_detection.txt,sha256=Dn9bBEhDSn-oNxT905gj3yvmPZr2edRRquob5r6dVmY,9 +astropy/tests/tests/test_imports.py,sha256=dTfwNooh1jd_9kCLLYBsGJLW6f5hGa5ZxW_0WZCVGcA,2021 +astropy/tests/tests/test_imports.pyc,, +astropy/tests/tests/test_open_file_detection.py,sha256=cm70Xp2a_B7xxjKf3JgW512zAPqYp46qRJfeFjN1Ji8,347 +astropy/tests/tests/test_open_file_detection.pyc,, +astropy/tests/tests/test_quantity_helpers.py,sha256=ifrpOmcZCc9x535b0FKqDeEiR1d0Mm5RBHD_Bb_XdLE,1529 +astropy/tests/tests/test_quantity_helpers.pyc,, +astropy/tests/tests/test_run_tests.py,sha256=HBdQpK4q0nm9NZsD1qGSLnX6CnLJwmpq9zdPQ0oEMa4,1997 +astropy/tests/tests/test_run_tests.pyc,, +astropy/tests/tests/test_runner.py,sha256=gHlq8L_5cy1xUEELj-w7C1zGxWtA-hZEDaVWzaBp9sQ,2026 +astropy/tests/tests/test_runner.pyc,, +astropy/tests/tests/test_skip_remote_data.py,sha256=NAZCF2xCG_8oCD8seyBctn6ldwWUU6rV3SwJ_jyTbss,1765 +astropy/tests/tests/test_skip_remote_data.pyc,, +astropy/tests/tests/test_socketblocker.py,sha256=uy79Sz4LAUr2R3lhdE0KF3WutnU0UXYV6VM8n6s8Dkc,2482 +astropy/tests/tests/test_socketblocker.pyc,, +astropy/time/__init__.py,sha256=o8Xjf3GZOA0CQ_NI_Aa1VTIUt6caQpY2QSyPd_nwPsA,107 +astropy/time/__init__.pyc,, +astropy/time/core.py,sha256=G_KAH1gk82dEkgWVBVugvUXUgEVfegNgPYUo3WI6oKc,70269 +astropy/time/core.pyc,, +astropy/time/formats.py,sha256=GaeKA3NOr56rooK0-LICVv89AyqCQP2_vQ4rC5zyrj4,44049 +astropy/time/formats.pyc,, +astropy/time/setup_package.py,sha256=82VuCVSU5qR1xNbgTtuQ-m9p-q3dpRAP499jMIfi3Hs,104 +astropy/time/setup_package.pyc,, +astropy/time/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/time/tests/__init__.pyc,, +astropy/time/tests/test_basic.py,sha256=ueT5YikKLmoETbHkP1QDbbKd59RSBgjP_1jaYL-jr_k,45811 +astropy/time/tests/test_basic.pyc,, +astropy/time/tests/test_comparisons.py,sha256=cXj7cqvAvynpl_NZkuWehZzGCie2y29v6PWtDYd--QM,3073 +astropy/time/tests/test_comparisons.pyc,, +astropy/time/tests/test_corrs.py,sha256=UeYvVeKIdaJyt9Zs5vERCkEf2s1Lrj_rjKfu8zoxJAU,2762 +astropy/time/tests/test_corrs.pyc,, +astropy/time/tests/test_delta.py,sha256=DkUGwI9WsnF6G6vpXF4eSH5oXFOsJdtOtra5RBDd3bU,14976 +astropy/time/tests/test_delta.pyc,, +astropy/time/tests/test_guess.py,sha256=WFQSkNLNRD2YLwewS34hw3YXUxAjgr4ksw96IDEZzbk,1031 +astropy/time/tests/test_guess.pyc,, +astropy/time/tests/test_methods.py,sha256=YA4HxuUhk-JVZSHJfF2A6yxIQv2e65N0LgeeAg6wKz0,19086 +astropy/time/tests/test_methods.pyc,, +astropy/time/tests/test_pickle.py,sha256=JisMq-uCmSHHpM-jgCJkD08uMF8WOBbqhI_YJlo6tHY,741 +astropy/time/tests/test_pickle.pyc,, +astropy/time/tests/test_precision.py,sha256=g8Hw6OTn8ZH7hhrugqL7jUU6yNwcQaWPrvBLZ8Rmolc,4223 +astropy/time/tests/test_precision.pyc,, +astropy/time/tests/test_quantity_interaction.py,sha256=-VH_6p23d0uVrEsMGMbWbg1icO7srKUpx4RXhbkwSs4,9573 +astropy/time/tests/test_quantity_interaction.pyc,, +astropy/time/tests/test_sidereal.py,sha256=iaULPHzcbImY62J1V2U_LpbrGzhkAhcPZ8Etv79_hdI,7713 +astropy/time/tests/test_sidereal.pyc,, +astropy/time/tests/test_ut1.py,sha256=-hKC38DYFG4uFttlVBS2KOB6N__dq-1ZQwfNa6TKoN8,3164 +astropy/time/tests/test_ut1.pyc,, +astropy/time/utils.py,sha256=oaZXSYbvPWhM866NAcU53iMw7NflDcOHliDSgxDEniQ,5478 +astropy/time/utils.pyc,, +astropy/units/__init__.py,sha256=aLRzo6MfJdsIyvsRRKd7__uioG17MdRDO7YhDRzasfU,1057 +astropy/units/__init__.pyc,, +astropy/units/astrophys.py,sha256=4UxsXMO_bSFyafzZvJiHf8vCi1NWSHY0iLrkQ4i8KRA,6890 +astropy/units/astrophys.pyc,, +astropy/units/cds.py,sha256=uWwkFnzLZBLUH0Ej_BYGhgY96Eo-mf8ncz8JDe-5Kc4,7192 +astropy/units/cds.pyc,, +astropy/units/cgs.py,sha256=WbfR9z2KQQjnYWf2MbS3Q3nnA3_mdADyBFAa5wSzQ_8,3775 +astropy/units/cgs.pyc,, +astropy/units/core.py,sha256=IllpiFtU_WgrVwYIM3JIqiVdqX4vXRDI1A8AVy8okjc,78900 +astropy/units/core.pyc,, +astropy/units/decorators.py,sha256=eYCkHCG53AoEvhdTAD8Vls4sfMcqn-eZAIie46lFi80,8721 +astropy/units/decorators.pyc,, +astropy/units/deprecated.py,sha256=02JvVrpTh5zS7yFsf0yVVa-iJ966NSymEacgq9EhavM,2340 +astropy/units/deprecated.pyc,, +astropy/units/equivalencies.py,sha256=HnZ-gk7KK3af9KjzypNLvT2_CnberIq9HJc58a1reYI,20559 +astropy/units/equivalencies.pyc,, +astropy/units/format/__init__.py,sha256=DT1zKDPnsBW675zT8CMDw8FcIq3MF6m-qksewa5RMC0,2025 +astropy/units/format/__init__.pyc,, +astropy/units/format/base.py,sha256=0ug16pl2aza3xrkwX3vlAuZjk8HrU7H8Xt9R5qqstNU,1406 +astropy/units/format/base.pyc,, +astropy/units/format/cds.py,sha256=Kj_7cIae99_lHBe0AQ7fYmYzqvjFCKoKGR_F9Od_2N0,10253 +astropy/units/format/cds.pyc,, +astropy/units/format/cds_lextab.py,sha256=xFFJlw0Gg3Y6EN11WwjSz7PGOLstF0AQv6xeJv9ixxo,971 +astropy/units/format/cds_lextab.pyc,, +astropy/units/format/cds_parsetab.py,sha256=clKbuGx140YQzT-EahgBQDoeb8Ts_BKS6t1z-RQHtNk,4034 +astropy/units/format/cds_parsetab.pyc,, +astropy/units/format/console.py,sha256=BbgyZjwnC0Yy8O6QxP0Yn7DYhn24T4-tvFgtFGwVHwE,2875 +astropy/units/format/console.pyc,, +astropy/units/format/fits.py,sha256=aWM8F6-Unyi96BWGNk0bdjlNTw6nv-7YBtXY5MgCn3Q,5418 +astropy/units/format/fits.pyc,, +astropy/units/format/generic.py,sha256=8XzWwaJgcq0ofs8e2jjuaMo1PiTuNWJvXQIcIQRfogY,15267 +astropy/units/format/generic.pyc,, +astropy/units/format/generic_lextab.py,sha256=rXRdspEX-7bQjVrPSWnnELjtQKRaQdhKg7AJ4jIbUD8,1254 +astropy/units/format/generic_lextab.pyc,, +astropy/units/format/generic_parsetab.py,sha256=Lzq_JUtRMhtnEKX3FwKfT3Ux6LVMu3jVEGjCItd1ZQc,9332 +astropy/units/format/generic_parsetab.pyc,, +astropy/units/format/latex.py,sha256=X9mmOFKqFhZO6smfJUC_btgaXOWgI7A3MvF4twJYdSg,4096 +astropy/units/format/latex.pyc,, +astropy/units/format/ogip.py,sha256=1mPz8kB7tGuyBj6Usywuo9iWjX9Lfoj2cUd_jEzAhzQ,14956 +astropy/units/format/ogip.pyc,, +astropy/units/format/ogip_lextab.py,sha256=UUAJW5aE9bjPlfKPJTGutdTNkcLIgHJq45G_E79HAcc,1267 +astropy/units/format/ogip_lextab.pyc,, +astropy/units/format/ogip_parsetab.py,sha256=42Bo47vr61YCuQWlite4fJeiTDEqXmOY4PAJn9ZxvTY,6922 +astropy/units/format/ogip_parsetab.pyc,, +astropy/units/format/unicode_format.py,sha256=uTDHMXqz9qwwkbxnW1wLzu29wv6OsdVC94Ic3xtRRRo,1729 +astropy/units/format/unicode_format.pyc,, +astropy/units/format/utils.py,sha256=Tuk3KxYHDMoOm-aAsGW26TyADnVHnzxfjWT_PCAU2hQ,6168 +astropy/units/format/utils.pyc,, +astropy/units/format/vounit.py,sha256=f9w_E-SIKml1ttY_S0_eNP8KPch_VOm9i3HR3Cj40Ss,8246 +astropy/units/format/vounit.pyc,, +astropy/units/function/__init__.py,sha256=geXtW7km4xkzGokaSGNx0bbPBFJSYLAyfls2VUtv7FY,360 +astropy/units/function/__init__.pyc,, +astropy/units/function/core.py,sha256=IdAo08CwSihzxJUmjZtwUzgfphSvKRLut3h8lVySU10,26563 +astropy/units/function/core.pyc,, +astropy/units/function/logarithmic.py,sha256=ds86UL2O2YhhjPI-IWEwOKkL9u6Dx5ZUuzgacwNfMNs,12518 +astropy/units/function/logarithmic.pyc,, +astropy/units/function/magnitude_zero_points.py,sha256=TXkUlscqyRWW0egL09NwzkD76TQ92VNWX4vYQWZ8Wxk,2390 +astropy/units/function/magnitude_zero_points.pyc,, +astropy/units/function/mixin.py,sha256=TBl3eNbNykZfm2im8nqNfn4wG09j2ilqxnOVPfRzAAg,745 +astropy/units/function/mixin.pyc,, +astropy/units/function/units.py,sha256=SIV2jr6R5HhFxPQyxnzocm2BVAPbBs26jAdUP3bbAG8,1862 +astropy/units/function/units.pyc,, +astropy/units/imperial.py,sha256=ufU4-t0tJ84LUKxNWBgdFd6YkpuQuN75_BH5q4d27Lg,5444 +astropy/units/imperial.pyc,, +astropy/units/physical.py,sha256=w_OCugeDYmji-h1OBhFAaqMM4PdbNwvB7Z28Ap1_aVY,4143 +astropy/units/physical.pyc,, +astropy/units/quantity.py,sha256=YKU3Wsoj6p9oPBcOE_PdQnp8ZTpRieP_pepv-aZw2LM,69207 +astropy/units/quantity.pyc,, +astropy/units/quantity_helper.py,sha256=iQ1cQsCpbrn-Zw6jxbHgthiu-nXxslW87yRDmHnDEQk,25608 +astropy/units/quantity_helper.pyc,, +astropy/units/required_by_vounit.py,sha256=sgkpUykMfKqKV9NaPgNccvr6WHTOQksszlWJEIfyYEc,2280 +astropy/units/required_by_vounit.pyc,, +astropy/units/setup_package.py,sha256=CmVIib0eRCfKQmyaFuNG0SORkuN7YvwGir-_srMohRQ,38 +astropy/units/setup_package.pyc,, +astropy/units/si.py,sha256=yrssAwH4mzmN1kf3oCCxrDNS5W2egI-qSXRJqCai0yc,8764 +astropy/units/si.pyc,, +astropy/units/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/units/tests/__init__.pyc,, +astropy/units/tests/py3_test_quantity_annotations.py,sha256=-kKQXrXfPq9BflO7hDwlNM74gilCQ4exQCoClMed7ls,8936 +astropy/units/tests/py3_test_quantity_annotations.pyc,, +astropy/units/tests/test_deprecated.py,sha256=kvfLLVYpjncE31INaZ40i3MmjGf-N1qLIG_m8JeOJcU,2280 +astropy/units/tests/test_deprecated.pyc,, +astropy/units/tests/test_equivalencies.py,sha256=JC__bcZnVVhpm4JIg2GKoXICcBupwZymVeLafUb8kCg,25642 +astropy/units/tests/test_equivalencies.pyc,, +astropy/units/tests/test_format.py,sha256=cGKRiwNFTc4WUe0RQworbvee4sU3T2HgSgVNRggZCB8,16334 +astropy/units/tests/test_format.pyc,, +astropy/units/tests/test_logarithmic.py,sha256=S2ORrvHdckP2h5DObMXhpOW3yXKeBR63M5y-KCAMVKY,32026 +astropy/units/tests/test_logarithmic.pyc,, +astropy/units/tests/test_physical.py,sha256=LNVJtJGAfEKHV3AKbC2I4YbXPZUuzzYgse8NktvmjDw,1469 +astropy/units/tests/test_physical.pyc,, +astropy/units/tests/test_quantity.py,sha256=D9PnI-Upc3qVF4qUd1iZBWQUU-zvjKuPn7WZZFi1VeY,47617 +astropy/units/tests/test_quantity.pyc,, +astropy/units/tests/test_quantity_array_methods.py,sha256=S0OBrGK1VoWtFxZmYBKzqtn8QlMblamnZS9rAtZ0o0w,18468 +astropy/units/tests/test_quantity_array_methods.pyc,, +astropy/units/tests/test_quantity_decorator.py,sha256=Ctd_fjYFT5yneDVt1S26cDF0wNcX7_SoHw27Uc4OV8Q,8748 +astropy/units/tests/test_quantity_decorator.pyc,, +astropy/units/tests/test_quantity_non_ufuncs.py,sha256=4y-fiKZq38W_eKH0SNvsLdz4wygMmK38EqR_EEggxo4,950 +astropy/units/tests/test_quantity_non_ufuncs.pyc,, +astropy/units/tests/test_quantity_ufuncs.py,sha256=jsLyTJfIjw12bHyYoPHPSRIzXrfEJtTdSFGZFROH5QY,38591 +astropy/units/tests/test_quantity_ufuncs.pyc,, +astropy/units/tests/test_units.py,sha256=5yqDdxL1LRiqoVY8PC2B_fwnbLjBUTDKXd6CMhtNyx8,20899 +astropy/units/tests/test_units.pyc,, +astropy/units/tests/test_utils.py,sha256=h7f8dKmkc-9mHddX07Udn0ZmEan_F1B1MfjC2wOmfAA,825 +astropy/units/tests/test_utils.pyc,, +astropy/units/utils.py,sha256=XCLN3Lw09bFIHpdwncR38Fv5UuGK9pMl6-GRIbnMbJs,7848 +astropy/units/utils.pyc,, +astropy/utils/__init__.py,sha256=NsOBP1T2Y9i7v7jS-7wcOzb1BR0jBwb6KxVgHAOnAUg,730 +astropy/utils/__init__.pyc,, +astropy/utils/_compiler.so,sha256=n8-mSDZHNlEm8UGxW6LY_VlrQS2cc1wwF7dfzAAZcxk,23600 +astropy/utils/argparse.py,sha256=Qmq4I1EyK-pQdnlCZV8DFCsrpsuQq8dkZfOPe1MizDk,1599 +astropy/utils/argparse.pyc,, +astropy/utils/codegen.py,sha256=feu7ctkdGLt3G9Jz4GBSzV4irw6zq7S1JQVgu3UU1Vk,4712 +astropy/utils/codegen.pyc,, +astropy/utils/collections.py,sha256=92oxnxWmkRA5b5oAUWw8teHx7i0RwDJJEQZwD43yhNc,1388 +astropy/utils/collections.pyc,, +astropy/utils/compat/__init__.py,sha256=BDwVUrqCqzYZzjE9vB-aok4In6isLsSbgWMf4IwUhAI,481 +astropy/utils/compat/__init__.pyc,, +astropy/utils/compat/_funcsigs.py,sha256=yF5iqpXc3k6gutqMe64HJvqOv1iNDJ25yE2gHwCeZL4,29814 +astropy/utils/compat/_funcsigs.pyc,, +astropy/utils/compat/funcsigs.py,sha256=PdIsKQP-ijPy_AiIQt8FVFLEd5oJYtySUEY_-YrAe7w,241 +astropy/utils/compat/funcsigs.pyc,, +astropy/utils/compat/futures/__init__.py,sha256=LegOSFn3GsksTyBZ7_K-x_A3qTGpITCVFnOUtouq1HA,708 +astropy/utils/compat/futures/__init__.pyc,, +astropy/utils/compat/futures/_base.py,sha256=H46iUo1ESAO2BscEmw5MmycGGyttcOOYxgr3dJ1GINE,20043 +astropy/utils/compat/futures/_base.pyc,, +astropy/utils/compat/futures/process.py,sha256=wxENTs8j1BqU9sTTnymghde5BPPZ9EB7dBPdp4XmqpA,14632 +astropy/utils/compat/futures/process.pyc,, +astropy/utils/compat/futures/thread.py,sha256=hjtM4SYiFRmfUPPFzqecmFO7bYz81mUsGw03hK12KFk,4807 +astropy/utils/compat/futures/thread.pyc,, +astropy/utils/compat/misc.py,sha256=OWqHMamBN2b5O8OTiJfBrL1-W9gXdRP9sXTfM4nRuug,4732 +astropy/utils/compat/misc.pyc,, +astropy/utils/compat/numpy/__init__.py,sha256=_JLh55pTW2fGFsIRqTanH_N7zbcZFdYHeY394zQ-eco,392 +astropy/utils/compat/numpy/__init__.pyc,, +astropy/utils/compat/numpy/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/utils/compat/numpy/core/__init__.pyc,, +astropy/utils/compat/numpy/core/multiarray.py,sha256=FrdWsYQVo8GN1WPtFXaTJnGR_bH9YNTzzmNqtG5B5-A,2957 +astropy/utils/compat/numpy/core/multiarray.pyc,, +astropy/utils/compat/numpy/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/utils/compat/numpy/lib/__init__.pyc,, +astropy/utils/compat/numpy/lib/stride_tricks.py,sha256=1OT9jrccnLdPDukurqw0KBpSNNnJASka58T3S5gHxqU,7301 +astropy/utils/compat/numpy/lib/stride_tricks.pyc,, +astropy/utils/compat/numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/utils/compat/numpy/tests/__init__.pyc,, +astropy/utils/compat/numpy/tests/test_broadcast_arrays.py,sha256=H-lgN9TFtHxKN068qu_DvlS664O28p_6MC-yhS8EBv0,1882 +astropy/utils/compat/numpy/tests/test_broadcast_arrays.pyc,, +astropy/utils/compat/numpy/tests/test_matmul.py,sha256=W1Vopukw5EtEDAuUCi-66Py4sJoArYXsW4WVhxmYqdM,1947 +astropy/utils/compat/numpy/tests/test_matmul.pyc,, +astropy/utils/compat/numpycompat.py,sha256=lqLzDuOA-b-77vq7UdDikQLRy8UhyvzQWdqaYN4JuCY,1046 +astropy/utils/compat/numpycompat.pyc,, +astropy/utils/console.py,sha256=OngaP3sAUWL7NRm-wXvad_fBSHVE43R5pNrd_4nwa0o,33325 +astropy/utils/console.pyc,, +astropy/utils/data.py,sha256=AQta2fvhWHLAS4y_S3_5oaW0AAfbmEBQeRP30oeYrxs,52647 +astropy/utils/data.pyc,, +astropy/utils/data_info.py,sha256=v5SrAE_VybR23C8IXs7W_2jMLg01FkDU1NGJLRlr_9c,21713 +astropy/utils/data_info.pyc,, +astropy/utils/decorators.py,sha256=n84zSV0bCzN7_oblUHa2_OMzy98l4C6v6xuxTYYM1D8,41883 +astropy/utils/decorators.pyc,, +astropy/utils/exceptions.py,sha256=z581BnegZwYZgoqURbirqIdFWR5m1Ti-Ip1rnNENNC0,1362 +astropy/utils/exceptions.pyc,, +astropy/utils/iers/__init__.py,sha256=70PwlRXFeNILvhfGtpKI83xg7itSGopm6pIPsUURQVI,130 +astropy/utils/iers/__init__.pyc,, +astropy/utils/iers/data/ReadMe.eopc04_IAU2000,sha256=odVASGVkgVh801OWaz7Jq7rULvQEEBBgHb_3XZ0R1kQ,2170 +astropy/utils/iers/data/ReadMe.finals2000A,sha256=fGGCzA_Qy-zjlxH2SNFeSLSRaJJWAuNgpXCcXMyNWhI,3429 +astropy/utils/iers/data/eopc04_IAU2000.62-now,sha256=jxc9mwZNI6FCSF_T_TEiv26RFlaQKhQvYgMiTnn-Km4,3098988 +astropy/utils/iers/iers.py,sha256=1se_QN3d5TNOgSjAbaUD04YKOuRGY8PGbXZU3c6bzmg,28055 +astropy/utils/iers/iers.pyc,, +astropy/utils/iers/tests/__init__.py,sha256=vnxqCkx7tTOfFMhgE7GuShCWxdMidhLZOL13el78R68,109 +astropy/utils/iers/tests/__init__.pyc,, +astropy/utils/iers/tests/finals2000A-2016-02-30-test,sha256=Tr4de3PTwjVpsAum7rrKvPF5OrQwNeW0wGFaUdAbLFg,34026 +astropy/utils/iers/tests/finals2000A-2016-04-30-test,sha256=b1d5gp7Ds1k5qF7ZcNYitcCHpipwkVgf8V0-ZEoFIpo,33917 +astropy/utils/iers/tests/iers_a_excerpt,sha256=9locNMrUY5FMLu7zRsa5dUeEFdjsgPBh74M2-0Meknc,11280 +astropy/utils/iers/tests/test_iers.py,sha256=1MA9sqaoekucbhuPiF0UuDqRoNqkFjR_wJ78MFASmNI,12313 +astropy/utils/iers/tests/test_iers.pyc,, +astropy/utils/introspection.py,sha256=5KtPC0HvxVnJK_A2vk0rIrcY7hWrbZ9nAxXQ33Jbv18,13633 +astropy/utils/introspection.pyc,, +astropy/utils/metadata.py,sha256=3LRAV-TDekfcnNpmMaA106sVHxCZmHGgyaJpsjJDiS8,14883 +astropy/utils/metadata.pyc,, +astropy/utils/misc.py,sha256=XPsYDlufrAlpQYIKmUwrR7uzc-9A3839gl_5ZRBvVRo,39088 +astropy/utils/misc.pyc,, +astropy/utils/setup_package.py,sha256=tUkJe-SzM3rlN5MhDxWU_RH6sPHeWmYEBcVrS5Xa0RQ,1192 +astropy/utils/setup_package.pyc,, +astropy/utils/state.py,sha256=owPheA87OpO633ZpiXN4q0BB-mrBAQEDzdTFuDfR97s,1936 +astropy/utils/state.pyc,, +astropy/utils/tests/__init__.py,sha256=vnxqCkx7tTOfFMhgE7GuShCWxdMidhLZOL13el78R68,109 +astropy/utils/tests/__init__.pyc,, +astropy/utils/tests/data/.hidden_file.txt,sha256=zN30ZDXvfZ8y95PuAkGxfL1MvqXYxJh4aY2yTPtQZJE,36 +astropy/utils/tests/data/alias.cfg,sha256=zJojR6EGTRE-BX34gy-HZp18T1pzGBy3hilbPn9WoWM,42 +astropy/utils/tests/data/dataurl/index.html,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/utils/tests/data/dataurl_mirror/index.html,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/utils/tests/data/local.dat,sha256=JGgy4ZeKZPN0uaw9mtMaoYw9CHYlFEDQYfBxoUMHiZM,69 +astropy/utils/tests/data/local.dat.bz2,sha256=F8Kt61rOaPSqSvAphYS1srdeS6jmMHAzZ5qVr1JmIIc,96 +astropy/utils/tests/data/local.dat.gz,sha256=EjQQulF2cfojD46FnXoTK1MwzlJ0R615yUoGdNLXjQM,94 +astropy/utils/tests/data/local.dat.xz,sha256=8qI-OYMGn8BHQJ3HNf0hX7Gz-D0Zne254vljliNKTqU,128 +astropy/utils/tests/data/test_package/__init__.py,sha256=BP4HcZ6lZh6fIkY3T267fKqhdqbd9qXjgcDqe8OrsNI,129 +astropy/utils/tests/data/test_package/__init__.pyc,, +astropy/utils/tests/data/test_package/data/foo.txt,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/utils/tests/data/unicode.txt,sha256=TYSzbz7mzL_3wd0qpH-e1j8ciJBZHg06Jx1nP0APhNk,58 +astropy/utils/tests/data/unicode.txt.bz2,sha256=pgDzi5fWIN_84O5gwEdTVFEyQVHcl8TFbC9atbkC8sQ,89 +astropy/utils/tests/data/unicode.txt.gz,sha256=3cAWZpk5aFMCQ3h9CL6DuC4Kqk4hzXS_7CJ_YSsnVkI,86 +astropy/utils/tests/data/unicode.txt.xz,sha256=pwAYJ-esz_KcFrcK820DvW0MaZfkGANWBeJJvu_n3PA,116 +astropy/utils/tests/test_codegen.py,sha256=ZUAc7wqmuVkFgsJaQhSoaDJGe2In6DWFeqNzxvhO_Cg,1487 +astropy/utils/tests/test_codegen.pyc,, +astropy/utils/tests/test_collections.py,sha256=yRXICa2qC4WeImGQtXBQ-wEKjWte3TZvYycASrv3LC4,756 +astropy/utils/tests/test_collections.pyc,, +astropy/utils/tests/test_console.py,sha256=Vi7iRdhrZ0By4EZJXAWMQsaVBMUj6vdPcsXx97RuwbM,6259 +astropy/utils/tests/test_console.pyc,, +astropy/utils/tests/test_data.py,sha256=FtDSmwOIxLYqe8-wkQulWQ56A7CurZKp790sBK419V0,17077 +astropy/utils/tests/test_data.pyc,, +astropy/utils/tests/test_data_info.py,sha256=Tz9Cskf40cHLKPpZ7mrsnHPGMQgoYCR4d0ZnMLGo2lA,1575 +astropy/utils/tests/test_data_info.pyc,, +astropy/utils/tests/test_decorators.py,sha256=7rVJFx8x4FXPvigJrR-KRjf25bSSrl0P589Qsm2xkgA,20693 +astropy/utils/tests/test_decorators.pyc,, +astropy/utils/tests/test_introspection.py,sha256=JzqXd48GNtOAxSzEtutcQK9JN-Pv2vzYOLdavZF5E8E,3963 +astropy/utils/tests/test_introspection.pyc,, +astropy/utils/tests/test_metadata.py,sha256=RJpVYffAyC9mitavnRCNKA3apQlGDfpkvat88BLFqc0,6538 +astropy/utils/tests/test_metadata.pyc,, +astropy/utils/tests/test_misc.py,sha256=NHN1VmfMzUbL7szkJQxGMpFGs1b3-QhZLHPoC8wVQGM,4155 +astropy/utils/tests/test_misc.pyc,, +astropy/utils/tests/test_timer.py,sha256=72ZkWIKDoiawxTtDyRrfW9t6-_xoaH-vhQwWLtpwf4Y,2077 +astropy/utils/tests/test_timer.pyc,, +astropy/utils/tests/test_xml.py,sha256=PdmjWJ4FVCHVLC6T_dIZGXSWuMxSqvF1W5sqkZ6-tWo,3245 +astropy/utils/tests/test_xml.pyc,, +astropy/utils/timer.py,sha256=bZ08id5k-K7vq8z9bhKcWjKei1kbKp3KoXLfopHshpw,10993 +astropy/utils/timer.pyc,, +astropy/utils/xml/__init__.py,sha256=vnxqCkx7tTOfFMhgE7GuShCWxdMidhLZOL13el78R68,109 +astropy/utils/xml/__init__.pyc,, +astropy/utils/xml/_iterparser.so,sha256=1-3ZTdk0o_rJ6cqMhJhgyfaI8jves2TwsN3s6Oz10KA,603016 +astropy/utils/xml/check.py,sha256=on1dZKh9iidd9sqC4XM9XhqojMJw-zdeHhn_C7RMljA,2277 +astropy/utils/xml/check.pyc,, +astropy/utils/xml/iterparser.py,sha256=_jGkttuS6LHMDTLafUjr-mmkaq5fEK-V2gHXHndQUNE,5841 +astropy/utils/xml/iterparser.pyc,, +astropy/utils/xml/setup_package.py,sha256=x6fjNlkKISuceCAw1zoXxo0wvxErIugYs6z9n5D4soE,1624 +astropy/utils/xml/setup_package.pyc,, +astropy/utils/xml/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/utils/xml/tests/__init__.pyc,, +astropy/utils/xml/tests/test_iterparse.py,sha256=rUOFsrmvsOZnt0ST4-7_p3-qnpPuWeYj7tFi6mrcKYw,4963 +astropy/utils/xml/tests/test_iterparse.pyc,, +astropy/utils/xml/unescaper.py,sha256=aZeF4foaTmdOCDUWGJowbT0m6svu2XJRJCbg6gfeb-U,1522 +astropy/utils/xml/unescaper.pyc,, +astropy/utils/xml/validate.py,sha256=u6r3ix_eapcBW49Qm-bRwOeYPGytQ5PzwELylXMQGWU,1710 +astropy/utils/xml/validate.pyc,, +astropy/utils/xml/writer.py,sha256=jBfBknV6GQ1X8xNIYbvpyyRdFV8-BLEPSnp68-JUxoQ,10582 +astropy/utils/xml/writer.pyc,, +astropy/version.py,sha256=Qcabrbvpv1PuVOvjZK52plLz8QVdqY4Y0oNAPfJ588s,538 +astropy/version.pyc,, +astropy/visualization/__init__.py,sha256=E9IF1Vgizf9kYDGrsHKBzAHZqXwq2JD6cIgDCp_gNsM,258 +astropy/visualization/__init__.pyc,, +astropy/visualization/hist.py,sha256=fK9Sf1ONG3N4icyfmq3_gAAY5H_duMc6Z9obeflkLDQ,1917 +astropy/visualization/hist.pyc,, +astropy/visualization/interval.py,sha256=5DjcssWguqpux83ETxqUQ_vG8sfIoZ5eyBpTYhqJYB8,9330 +astropy/visualization/interval.pyc,, +astropy/visualization/lupton_rgb.py,sha256=SO22pG13KjnvlGId5WAobrIOqHYPVtoRCPBggXheAUg,12745 +astropy/visualization/lupton_rgb.pyc,, +astropy/visualization/mpl_normalize.py,sha256=R7ZzlcinxFAFYmpZXY5Du0Fneh2WQxcdpZKUFQwHRXs,8562 +astropy/visualization/mpl_normalize.pyc,, +astropy/visualization/mpl_style.py,sha256=tPmew8Y3lle6fFB_j_MnStU_vc-y_Iq2MmRAAAKpI4Y,3102 +astropy/visualization/mpl_style.pyc,, +astropy/visualization/scripts/__init__.py,sha256=ykqVHge2EmIDTMOd96h2DyGHaM_gpp_wKz3K1MlZZic,64 +astropy/visualization/scripts/__init__.pyc,, +astropy/visualization/scripts/fits2bitmap.py,sha256=f0Jbvv-M_Q0EIZAsBePm5VtfGf7xsHf3pI9sMxoAzlM,7857 +astropy/visualization/scripts/fits2bitmap.pyc,, +astropy/visualization/scripts/tests/__init__.py,sha256=ykqVHge2EmIDTMOd96h2DyGHaM_gpp_wKz3K1MlZZic,64 +astropy/visualization/scripts/tests/__init__.pyc,, +astropy/visualization/scripts/tests/test_fits2bitmap.py,sha256=gbNvdSwg40T_umHHkASXRqee2BJRLxdYuFBFx9-IEXo,2345 +astropy/visualization/scripts/tests/test_fits2bitmap.pyc,, +astropy/visualization/stretch.py,sha256=3wEyM3YMjnIvbdVLeyG9KVqqfmXM6mBUWuXqPYnTqYQ,14345 +astropy/visualization/stretch.pyc,, +astropy/visualization/tests/__init__.py,sha256=ykqVHge2EmIDTMOd96h2DyGHaM_gpp_wKz3K1MlZZic,64 +astropy/visualization/tests/__init__.pyc,, +astropy/visualization/tests/test_histogram.py,sha256=kMIKrG9j1f8bhMEAqM1Z576DFmuyZNTvZmJql4SHck8,1881 +astropy/visualization/tests/test_histogram.pyc,, +astropy/visualization/tests/test_interval.py,sha256=0t49WvQo3G8Rx2gUb_yHSbaBPbOqL1nnzq6q4p6Wc14,4710 +astropy/visualization/tests/test_interval.pyc,, +astropy/visualization/tests/test_lupton_rgb.py,sha256=-7hd-knUtjCNzn0Yxl11pDLhketcM3flXD_Fj1IUrWM,9330 +astropy/visualization/tests/test_lupton_rgb.pyc,, +astropy/visualization/tests/test_norm.py,sha256=-vkcjbrMThOk48hFKcUGFE8PR0KvKJnQ-_TJmJ4hVok,6703 +astropy/visualization/tests/test_norm.pyc,, +astropy/visualization/tests/test_stretch.py,sha256=1nkI-qRdYw8FPMBZkP_BBdGxp-Ar6FPqRb10C_ehgm0,4379 +astropy/visualization/tests/test_stretch.pyc,, +astropy/visualization/tests/test_units.py,sha256=F096TxMsfk5ze1bgbiK73kex_rArFGwikPDk5AxhyEA,1715 +astropy/visualization/tests/test_units.pyc,, +astropy/visualization/transform.py,sha256=gssyYhQpYtF-gNuSYBN2C7w2qKcAXSIhT65Qd6Hik3o,1201 +astropy/visualization/transform.pyc,, +astropy/visualization/units.py,sha256=kbjKCfAvRiH2gUyYPkrdA71yMyFYi7LePSsrwuV8mzo,3071 +astropy/visualization/units.pyc,, +astropy/visualization/wcsaxes/__init__.py,sha256=Pvarzde0H5lYlhH7T0DNokRd_lhQDkAblNcrsNszoic,1155 +astropy/visualization/wcsaxes/__init__.pyc,, +astropy/visualization/wcsaxes/axislabels.py,sha256=RoWoeVfB1n0HG7c8qpuMBrLls4abBE9rBR09S-T6llA,4505 +astropy/visualization/wcsaxes/axislabels.pyc,, +astropy/visualization/wcsaxes/coordinate_helpers.py,sha256=Tl2rXjdUewjJF0UqbQiictVdjWwDgf3TFadq7UIkWXQ,31175 +astropy/visualization/wcsaxes/coordinate_helpers.pyc,, +astropy/visualization/wcsaxes/coordinate_range.py,sha256=gdv8ibVNHOsqYAKVAj0QKW83Bige0_UY3ArlyznFcic,4361 +astropy/visualization/wcsaxes/coordinate_range.pyc,, +astropy/visualization/wcsaxes/coordinates_map.py,sha256=q5SN-NrMEv8ZV5EKy7yfe8Mpo_K3XrR0KO-uNwN2eUI,7453 +astropy/visualization/wcsaxes/coordinates_map.pyc,, +astropy/visualization/wcsaxes/core.py,sha256=4ACdGwXzWsgqAQxa972p26ezuAFhNGgtziVsEsMLC3Y,23632 +astropy/visualization/wcsaxes/core.pyc,, +astropy/visualization/wcsaxes/formatter_locator.py,sha256=jJYpmbbVrpzyeRo86IiDxOVsy8ATxs6Li4jo5ZwtKio,16072 +astropy/visualization/wcsaxes/formatter_locator.pyc,, +astropy/visualization/wcsaxes/frame.py,sha256=NxMZpYF6AuKtvSxxg4tqWkxv7We0e3rkpTuYUn-iXDE,7481 +astropy/visualization/wcsaxes/frame.pyc,, +astropy/visualization/wcsaxes/grid_paths.py,sha256=8YD-ZZ_36BZfOigTU_m4OD7xvWTnkflM1JgUrhFqqB4,3950 +astropy/visualization/wcsaxes/grid_paths.pyc,, +astropy/visualization/wcsaxes/patches.py,sha256=EG9TA-bVHsn20uQejfWrn6UXYhYsQlzYuZL1_fHIb6g,3480 +astropy/visualization/wcsaxes/patches.pyc,, +astropy/visualization/wcsaxes/tests/__init__.py,sha256=SpGd3pAPI_4OxD5AaNXj5er_Mu2stQX-5uxU0ftPu8k,304 +astropy/visualization/wcsaxes/tests/__init__.pyc,, +astropy/visualization/wcsaxes/tests/data/2MASS_k_header,sha256=M2DC6I5L8enE69knIBF4PqnCw05NmlKh97uyfO_PkZc,1133 +astropy/visualization/wcsaxes/tests/data/cube_header,sha256=o11AIq-WGRXvIOnuMwwMlxOAsbJEsK1AErQD0UtpQwI,1619 +astropy/visualization/wcsaxes/tests/data/msx_header,sha256=qczPbXvyNl4kWDv28Qt_fnX0pzHTW5ut57prZlFEW0w,1052 +astropy/visualization/wcsaxes/tests/data/rosat_header,sha256=S2k_t9Q9x-549QMb2pJd4hlxWzwAvC_xswDEAzxpdsE,1052 +astropy/visualization/wcsaxes/tests/data/slice_header,sha256=axmEHC5sxgJhIJkJyd02mOJaXAwFjuzUCPLEploNOCE,1123 +astropy/visualization/wcsaxes/tests/datasets.py,sha256=35PPRyPXXFzuX66hfvn3WCDaQxSQHWD1yz3z8hqjnBc,1102 +astropy/visualization/wcsaxes/tests/datasets.pyc,, +astropy/visualization/wcsaxes/tests/setup_package.py,sha256=CYIwO14qdbg-WyALatLich1tuSDTCSBfEi-gpi77Nws,114 +astropy/visualization/wcsaxes/tests/setup_package.pyc,, +astropy/visualization/wcsaxes/tests/test_coordinate_helpers.py,sha256=LH6FvnSOCQgj5aaZ5DX6pZt6TM-5s0SXQmE7MARXfqA,477 +astropy/visualization/wcsaxes/tests/test_coordinate_helpers.pyc,, +astropy/visualization/wcsaxes/tests/test_display_world_coordinates.py,sha256=F7O6i0C3n8HfuTGc1uGAIjoyK-RjsHvNcPu6B0KCzAY,4792 +astropy/visualization/wcsaxes/tests/test_display_world_coordinates.pyc,, +astropy/visualization/wcsaxes/tests/test_formatter_locator.py,sha256=v2_DUk62r31V7ASwC4F27paQT4XbiRppnuO6a9a_2q0,16024 +astropy/visualization/wcsaxes/tests/test_formatter_locator.pyc,, +astropy/visualization/wcsaxes/tests/test_frame.py,sha256=t8y8tXrv5s4YzNsW6tAtIYCNcaXw8GAr152JE7sVIVg,5336 +astropy/visualization/wcsaxes/tests/test_frame.pyc,, +astropy/visualization/wcsaxes/tests/test_images.py,sha256=OWhTzfwrUGw50nG5NZqP8R3MKyMfocaR38dopDUcV_A,23237 +astropy/visualization/wcsaxes/tests/test_images.pyc,, +astropy/visualization/wcsaxes/tests/test_misc.py,sha256=ndvubTIUp6RGNzZ3bn7lbkNZRdD9sA03o4dqGDiG-0E,7271 +astropy/visualization/wcsaxes/tests/test_misc.pyc,, +astropy/visualization/wcsaxes/tests/test_transform_coord_meta.py,sha256=soWc1Zd2LiCtO77qgEG9Aq1vJNpAjbWRdxo2Df57wno,5125 +astropy/visualization/wcsaxes/tests/test_transform_coord_meta.pyc,, +astropy/visualization/wcsaxes/tests/test_transforms.py,sha256=AUyCuOg3ZtPh9EJZFqqD0uZSeh9WY_EO7M3HJHslFEE,1335 +astropy/visualization/wcsaxes/tests/test_transforms.pyc,, +astropy/visualization/wcsaxes/tests/test_utils.py,sha256=fbayPvrTTGHYBdHACJ39qwXEptosSH7gLdVmbXXIVto,4139 +astropy/visualization/wcsaxes/tests/test_utils.pyc,, +astropy/visualization/wcsaxes/ticklabels.py,sha256=CTD7JwgxYEgqKExrTJ49QilThlpH9Gxv9O95xkL33To,7676 +astropy/visualization/wcsaxes/ticklabels.pyc,, +astropy/visualization/wcsaxes/ticks.py,sha256=jCmwsTmIzW9xRT4a-fsbol6XqnIf7WDET3LiUpW_kfw,6157 +astropy/visualization/wcsaxes/ticks.pyc,, +astropy/visualization/wcsaxes/transforms.py,sha256=kCIGQlyiwRyxC0tYQ1TipP54WQEocebwo49NoIe3akw,8513 +astropy/visualization/wcsaxes/transforms.pyc,, +astropy/visualization/wcsaxes/utils.py,sha256=Kq25CA7hbk87IPcUjv4n6lVjeL6dBCqDuNi9cz3NhEM,3951 +astropy/visualization/wcsaxes/utils.pyc,, +astropy/vo/__init__.py,sha256=knB6R7sEszrfBhNOtcgE-QgtPGg7BulTVQ2ihfMDyOM,857 +astropy/vo/__init__.pyc,, +astropy/vo/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/vo/client/__init__.pyc,, +astropy/vo/client/conesearch.py,sha256=IfrPSySZZfYgAetFqj1CW482cM6Ts_XvBVmYz26LDvI,16891 +astropy/vo/client/conesearch.pyc,, +astropy/vo/client/exceptions.py,sha256=DHqhtusCSRtdjeMH34fsZDzGckQUyEACw5hfy9-4r0c,1292 +astropy/vo/client/exceptions.pyc,, +astropy/vo/client/setup_package.py,sha256=Gne-71vRwO3h9YVj4AaF6OLjq-kskMqHe2u4c0s8QxU,209 +astropy/vo/client/setup_package.pyc,, +astropy/vo/client/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/vo/client/tests/__init__.pyc,, +astropy/vo/client/tests/data/basic.json,sha256=Ik_3OTxgK8-whO-hljyopa2Ygz7PI-93YUgvuf2xtmw,166 +astropy/vo/client/tests/data/conesearch_error1.xml,sha256=G8UlPZcfLz5Bdmp-iqPWGixgk_mVkXdDd7-kPZYYD1w,251 +astropy/vo/client/tests/data/conesearch_error2.xml,sha256=haYov1OfZ--K851IOzzHG22lCgYZYt6MbaFrIe1Ya8g,300 +astropy/vo/client/tests/data/conesearch_error3.xml,sha256=bJOmiSkMjjaXqMb0N3F2PDnTdkJxLaSdGhUVQATQwBE,339 +astropy/vo/client/tests/data/conesearch_error4.xml,sha256=CWYmumCxwleTVRkfVtOf08BvPHXsjLWzBx-CanJagNQ,310 +astropy/vo/client/tests/test_conesearch.py,sha256=lfSndGIBa5sK3tFP_bxVqmHtpdMBeztUEcxvBrtNpRc,9458 +astropy/vo/client/tests/test_conesearch.pyc,, +astropy/vo/client/tests/test_vos_catalog.py,sha256=lRPVFHK25QN8WU2hgNliEPWV0pNpWfgg706xdCgRzj4,8173 +astropy/vo/client/tests/test_vos_catalog.pyc,, +astropy/vo/client/vo_async.py,sha256=ss0hV5YClOK0tvHXPNiLL5cGrhkmaNfKlhLH9dTtYh4,2518 +astropy/vo/client/vo_async.pyc,, +astropy/vo/client/vos_catalog.py,sha256=LcfTCFE1y8FRYqkPd7xtuLobu7v_eK1C2ngcvnLlW3U,28507 +astropy/vo/client/vos_catalog.pyc,, +astropy/vo/samp/__init__.py,sha256=I5z9DGjCOaEqhylHsxzs_Ed84MaSvxA5qSZ3s6RD28k,226 +astropy/vo/samp/__init__.pyc,, +astropy/vo/validator/__init__.py,sha256=QlIR5mi3lPfCTlCl-KXZrwCtY-Sa7llhZkQ8qrhKYTs,1399 +astropy/vo/validator/__init__.pyc,, +astropy/vo/validator/data/conesearch_urls.txt,sha256=dnzaOD4AW98JCTabUQg6VvpQE6lWwcfo7Drzz8ttYAY,2361 +astropy/vo/validator/exceptions.py,sha256=yhvwpyGdQb6QPzUSdlfm-p8OfubRbnwQweFQZnUR9aE,861 +astropy/vo/validator/exceptions.pyc,, +astropy/vo/validator/inspect.py,sha256=ZNhn9ptxRjUvG2K0KyPAK_y8NLtm61QG5YTbTf6JM-Q,5321 +astropy/vo/validator/inspect.pyc,, +astropy/vo/validator/setup_package.py,sha256=4-QF3u0ZWWBDFRCyfv2vY7p1fZtFoKpLhmzwlSHrjIw,313 +astropy/vo/validator/setup_package.pyc,, +astropy/vo/validator/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/vo/validator/tests/__init__.pyc,, +astropy/vo/validator/tests/data/conesearch_error.json,sha256=fuj7VENgeqYKvS0mOpNqFliQ7hQ3HX8em4R9vlpb7yA,45 +astropy/vo/validator/tests/data/conesearch_exception.json,sha256=fuj7VENgeqYKvS0mOpNqFliQ7hQ3HX8em4R9vlpb7yA,45 +astropy/vo/validator/tests/data/conesearch_good.json,sha256=kXsSRALcojTNBRPG3onx8z9zOUVy_FRXpXOWhujuFN4,3279 +astropy/vo/validator/tests/data/conesearch_warn.json,sha256=fuj7VENgeqYKvS0mOpNqFliQ7hQ3HX8em4R9vlpb7yA,45 +astropy/vo/validator/tests/data/listcats1.out,sha256=Ec0aKMGRpA4xs8a6S0Z3wbGyv5khy6u_SaNOLA8Jisw,245 +astropy/vo/validator/tests/data/listcats2.out,sha256=WuG8Zx3hNQV41z-l2Iny1Gq-kw0IfYeJPXWYq9UDf6o,114 +astropy/vo/validator/tests/data/printcat.out,sha256=ab89f255EEp6SJWyTSzkPuzYKRTEHvGfZDBlPRXwXpk,2866 +astropy/vo/validator/tests/data/tally.out,sha256=NvjmvG7TaZyGJ3pcKdFBLpgKHB13BNpo6SIVA7MZbdk,102 +astropy/vo/validator/tests/data/vao_conesearch_sites_121107_subset.xml,sha256=BfGCx0XpgNQiGb4dV-SoUq2Wzc2bT6IntISOIMjKNbw,4682 +astropy/vo/validator/tests/test_inpect.py,sha256=Njhtbb5IqoopdBBvT7g_WzOr0hLoWoPyAUig7OxfXvk,2841 +astropy/vo/validator/tests/test_inpect.pyc,, +astropy/vo/validator/tests/test_validate.py,sha256=g2bNxtRR0zcx6MLtfR0eYdkLypy8nrkIxt9AhVkq8mA,3074 +astropy/vo/validator/tests/test_validate.pyc,, +astropy/vo/validator/tstquery.py,sha256=G_bkz85Y59ktXdBFEEexLm5M_u23YdPoiToZCPa2KF0,3392 +astropy/vo/validator/tstquery.pyc,, +astropy/vo/validator/validate.py,sha256=lBY_1z89Kkaw3lJFxI8RLPNQ59l4gUcx9Mmq--Dxv2g,11373 +astropy/vo/validator/validate.pyc,, +astropy/wcs/__init__.py,sha256=ukWJ5BcS8gwTIFQWLKYf2lkSSx-6Mj_mGgera2iTjcs,1452 +astropy/wcs/__init__.pyc,, +astropy/wcs/_docutil.py,sha256=TiNbt5NPRz7o_76eZn5PhyBVhY13H_58Gq0fuyTMzl8,1779 +astropy/wcs/_docutil.pyc,, +astropy/wcs/_wcs.so,sha256=j-6cP9wRnaKESplkuj4GHFD9vZPFNtpSXxWRly_07lU,2908784 +astropy/wcs/docstrings.py,sha256=SBM66KCUIrMJ9_cZGfQ8ITO7E0J4MjlWiljaJAKaJIw,60905 +astropy/wcs/docstrings.pyc,, +astropy/wcs/include/astropy_wcs/astropy_wcs.h,sha256=ailzl2068NgA_dc3cvuW4qnogXVFhvZrLLUCaLTHq4w,482 +astropy/wcs/include/astropy_wcs/astropy_wcs_api.h,sha256=uTicxF8t-iIDMLJyUNwpzVbU1uOk3f67VaTyMAJYle4,4976 +astropy/wcs/include/astropy_wcs/distortion.h,sha256=w3yaPcOG0v1z75hWNLC4HKLM0jJMxidq_tApSSnlkYI,2656 +astropy/wcs/include/astropy_wcs/isnan.h,sha256=bkK9oBU-4WzupOgzl7EeQ0Eo0GPfaTFdBiNwhqQ9g_A,1182 +astropy/wcs/include/astropy_wcs/pipeline.h,sha256=wrODGeRXwF1B2XP6Z6qyfv0D4mQO_6cdMzW6j2Lwe30,2327 +astropy/wcs/include/astropy_wcs/pyutil.h,sha256=2G4AGrXLRYPWV_clqBqtqrBcVnmeDulynDaNSLda8iI,6805 +astropy/wcs/include/astropy_wcs/sip.h,sha256=gfjMI3J0VSQG9ab4G5OJzoM0lpmOy9hHJxjCZRnKwQ0,4208 +astropy/wcs/include/astropy_wcs/util.h,sha256=Km3k90qZiVdnU9rlzcGDrehQAY_776q1Ky9dYt623NM,538 +astropy/wcs/include/astropy_wcs/wcsconfig.h,sha256=-SYqyozJgU7hwbBC8wvvta_jpD3qcWjbkKqHU6e6HW8,900 +astropy/wcs/include/astropy_wcs_api.h,sha256=itXn8LTNnrjcPX5qpDdJmzBgMbn8868_AtJAsRg0O5k,105 +astropy/wcs/include/wcslib/cel.h,sha256=u33estQ9Fq5x1xDh7vTnMcTBa1GFFSadfWlb_i551Y0,18498 +astropy/wcs/include/wcslib/lin.h,sha256=TbrVS1IQhkQNhV1372Y85wqYJg_fF4_8Pk310wph10o,26261 +astropy/wcs/include/wcslib/prj.h,sha256=PSWN5QJZ_GrSgDFLDpUbl6UqW3zWB7YFqLwhJOik66U,31503 +astropy/wcs/include/wcslib/spc.h,sha256=FBy5oVCNInAJ3fKhajWdLzfeKHTE79mFh2sA7P0nwhA,39203 +astropy/wcs/include/wcslib/spx.h,sha256=oBeS4FdmwEfpcOqAolXHSDkN9gzNErZ52tQhXxgKtxs,20732 +astropy/wcs/include/wcslib/tab.h,sha256=227DQo-kvwWGlY4Bq59HMzl8qXz7Cw1Juv6WSX91O64,25584 +astropy/wcs/include/wcslib/wcs.h,sha256=T90r4AQ0GZ-T_99CvumRVf4dGtjGoFzvl00hGhrA5Rs,72749 +astropy/wcs/include/wcslib/wcserr.h,sha256=eO10UqXT__c6W88FhQPPxreBaDj20D-ctm-KyAW7OO8,9208 +astropy/wcs/include/wcslib/wcsmath.h,sha256=WNHzaGP2DtVk66Fo9rIGFLzFx17_llV1UNATujOwM3c,2044 +astropy/wcs/include/wcslib/wcsprintf.h,sha256=CtRIuDpwEg1X9ychpZdOgo6-3zdmWKeJV316ss87K6o,6102 +astropy/wcs/setup_package.py,sha256=NvddTHbb8y7PxHDDwaelGDmMmVxcl5uY2-n_gdLbzB8,10569 +astropy/wcs/setup_package.pyc,, +astropy/wcs/tests/__init__.py,sha256=4k4dBceP0AEuAPiUv7SF9M3hVvwP02uS257ctpXrTcg,147 +astropy/wcs/tests/__init__.pyc,, +astropy/wcs/tests/data/2wcses.hdr,sha256=wvk-MIKIF9d7x4RZizvENIo4ZKfJ90CW3qP04S9uHNQ,8640 +astropy/wcs/tests/data/3d_cd.hdr,sha256=CRVfYvN6l0hxVwPMKGKPuGkzx7tLUW9ZfT7_bvprjp0,1295 +astropy/wcs/tests/data/defunct_keywords.hdr,sha256=MNEZerZed6KEbxEXzwMUPapjRIgpGhTnymo8nkNcrys,3123 +astropy/wcs/tests/data/dist.fits,sha256=AXIOKR2I_pFzFOHdDCKOHWZMp9mSwAE5k-IBhJUl1bM,23040 +astropy/wcs/tests/data/dist_lookup.fits.gz,sha256=Us3GgwAb5XurTMOOS02YzTgpg9YlVw7--ebeHij3pVU,68483 +astropy/wcs/tests/data/header_newlines.fits,sha256=E1B8WLLO2cj28lHd9D3074isp5W3gnWGIs4lPqaUUwA,37440 +astropy/wcs/tests/data/invalid_header.hdr,sha256=XvZ7IdK9GqYeaiHCIOmcHSD0xyWe48MCFv7wnvlLzmY,28964 +astropy/wcs/tests/data/irac_sip.hdr,sha256=5c7u4wRh3VCy1nTIySO_LiKtPLPzZbXP7nioKvx93S8,19360 +astropy/wcs/tests/data/j94f05bgq_flt.fits,sha256=kAA44NhTgoFAp1fiZWk0yyaP-fMVxcb2F96FpjKtUms,83520 +astropy/wcs/tests/data/locale.hdr,sha256=0TYfA4-IJd6mtZGSHJFUXSN08TAs8hDZ_BJft-jPCM0,2880 +astropy/wcs/tests/data/nonstandard_units.hdr,sha256=_U78tjNeshdfjfEc4PaMjUbneo7QRpEgm7f2ahNo8DQ,1283 +astropy/wcs/tests/data/outside_sky.hdr,sha256=4QVktMeEy_9Jh7xSvgWNbCoOhnwTr6TOnfXpjOVXUbI,1471 +astropy/wcs/tests/data/sip-broken.hdr,sha256=8CjPzJ7FpE2Q-vsgb-1aNY3CVZ-BQNyxnojq18XMhsE,25920 +astropy/wcs/tests/data/sip.fits,sha256=6va-fWqHSABBBoKDJrX-tiBD9hSabTpssgwjKcnzkdM,5760 +astropy/wcs/tests/data/sip2.fits,sha256=0Sk_2QLWFZlI3d7PlCV1GUj1wH9HLx0_sb7hSO6jiX8,5760 +astropy/wcs/tests/data/siponly.hdr,sha256=rHhqEkk0o80A8RkhxvJke0cPBe8EovQDOWC0CGNaNKk,28800 +astropy/wcs/tests/data/sub-segfault.hdr,sha256=BmklcvQT4PR13x__gCwMWa4BKkKt0j9tXl2tjZipFr8,1986 +astropy/wcs/tests/data/too_many_pv.hdr,sha256=zWNL8mgcgsJeIQpvX7awnLaGdpcKiiHL6EwjBeD2-VA,34560 +astropy/wcs/tests/data/tpvonly.hdr,sha256=yj91PHBPrcevt0ssCFr9RTnElCca-9ISyTR2qSAliZ0,25920 +astropy/wcs/tests/data/unit.hdr,sha256=pnaByaDErnfZt1VCPBIO-d0sHLiiz-9lgPDJ8kIhuYI,2880 +astropy/wcs/tests/data/validate.5.0.txt,sha256=WkeknwqmYxZTPxRjjrqdCbR4prv3QtSFA5Ny3Gmv4RQ,605 +astropy/wcs/tests/data/validate.5.13.txt,sha256=gPVYDz8kG-StlRsnhkDkMff_AKUobxisoQUd7m-EAwI,599 +astropy/wcs/tests/data/validate.fits,sha256=3X-AOOpJ3tonKVBU-j2Q7efiWSRieVU9_nT3tXfPyxg,40320 +astropy/wcs/tests/data/validate.txt,sha256=nTBzSluFaoSz7U0ZyHLSB1W_cicwFKEEFzpHzv2dIqg,595 +astropy/wcs/tests/data/zpn-hole.hdr,sha256=dwOWuhTm8iSrVuie_qlHOO-EiPAwfT4dkzOi0ovjI6w,2880 +astropy/wcs/tests/extension/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +astropy/wcs/tests/extension/__init__.pyc,, +astropy/wcs/tests/extension/setup.py,sha256=6yxvIXJ5KcGch65-Rd7MhbFTsgT9jvePWgY_HU48sO0,1536 +astropy/wcs/tests/extension/setup.pyc,, +astropy/wcs/tests/extension/test_extension.py,sha256=xWFpJkjWRfSS_mGuOTmdl20ZyjCecY6pxwxQQWYV198,2869 +astropy/wcs/tests/extension/test_extension.pyc,, +astropy/wcs/tests/extension/wcsapi_test.c,sha256=9mky4jtAljFbum34WU-OPi2BLUdtcmrecZlfWygiEmw,1189 +astropy/wcs/tests/maps/1904-66_AIR.hdr,sha256=wVTlRZxBg7f6S8nP45PtyXuIvVWkBZ1pUdl8rjOP3sk,9280 +astropy/wcs/tests/maps/1904-66_AIT.hdr,sha256=4-hP422SgFfFOnF_PfVdKF30TgwbcUQG7gfzP6SJqJc,9200 +astropy/wcs/tests/maps/1904-66_ARC.hdr,sha256=UtXrKukkKuFz_zdU9p3Dx-SjkhcXV_6TEA1q2PmKJP8,9200 +astropy/wcs/tests/maps/1904-66_AZP.hdr,sha256=JZp8wSZJAl3WUZB4Nus5LAbxEFbjaVF5mTL9V2-ZrOQ,9360 +astropy/wcs/tests/maps/1904-66_BON.hdr,sha256=CxktHp7y6f5hidS01ojQu5eVAgIYMppKkMagwsQWgsw,9280 +astropy/wcs/tests/maps/1904-66_CAR.hdr,sha256=DPtguXGTmKjfe-9tgunr94jk0FhQklIUpYFtr73gUDY,9200 +astropy/wcs/tests/maps/1904-66_CEA.hdr,sha256=VbB6Z9MOTbuHlgTJu5TDqS-nFTExgSEf8udUKedRS2o,9280 +astropy/wcs/tests/maps/1904-66_COD.hdr,sha256=FvRTYQOS2bdnT05sniiKGusbRMLrcxlX36gCUKMOF2g,9360 +astropy/wcs/tests/maps/1904-66_COE.hdr,sha256=sNl9XKNzd2b1qDWw_iYR-cfHkaiL4ZGuGYv1YUd2wq8,9360 +astropy/wcs/tests/maps/1904-66_COO.hdr,sha256=PqZ4Ycqjb7KADa93McFgt6dju-9eB3cCL5A1l4hto4M,9360 +astropy/wcs/tests/maps/1904-66_COP.hdr,sha256=dtnyecQipwDx9uNwVXfrg2C0CCPBeS8-No1bMlh-os4,9360 +astropy/wcs/tests/maps/1904-66_CSC.hdr,sha256=f0X1nvcsbBFryC_Ns2ia79k-NgOMTBr-HeQmPukWKR8,9200 +astropy/wcs/tests/maps/1904-66_CYP.hdr,sha256=f8LkYfN87pSd3d4JgxVy6uC6El06vPIPLZOOtNfCZWk,9360 +astropy/wcs/tests/maps/1904-66_HPX.hdr,sha256=UCGYQMNhU4t-Q-Fw7141JHPnAtr1Z7CbWhVMgQl-1oQ,9440 +astropy/wcs/tests/maps/1904-66_MER.hdr,sha256=ZqLQhgcnnOSVxWjewTDxbQVuzbUmJ8MdP-uARoeDkRI,9200 +astropy/wcs/tests/maps/1904-66_MOL.hdr,sha256=U2w6_vbtIzCB7JceAVtobhLKvNf75nRWPwCL2PhZzis,9200 +astropy/wcs/tests/maps/1904-66_NCP.hdr,sha256=mytR_YjrxCHHwyekpXp9zcjfal6d9OdcUcsygXL9zoA,9360 +astropy/wcs/tests/maps/1904-66_PAR.hdr,sha256=jpvyG0q3C6NfVAjLHWkMz4xu7F37oB4LiYznTDjuz1o,9200 +astropy/wcs/tests/maps/1904-66_PCO.hdr,sha256=Ae_Jp7YfO34MiUxNIHRve0pQZlg6bF8dITlsIbIw89A,9200 +astropy/wcs/tests/maps/1904-66_QSC.hdr,sha256=fa4hDxsJUgtEBdrMtxwt7uFUbiVnlK_S1zkjx8PtuME,9200 +astropy/wcs/tests/maps/1904-66_SFL.hdr,sha256=deGT0yl_XNjm-A0sAXrm57eBDvi_4oghHiJR2BlARoY,9200 +astropy/wcs/tests/maps/1904-66_SIN.hdr,sha256=eLQUFg3opJoZb6h8EUXG63IY50-AlKLpGMlLJf3kkWw,9360 +astropy/wcs/tests/maps/1904-66_STG.hdr,sha256=l1Sx5QS82e0MjsDmH2IXO7UV__TduHOJTf7DZ2jstvc,9200 +astropy/wcs/tests/maps/1904-66_SZP.hdr,sha256=_TY5m-c9UdezfJj5nwP1QWV8Q_5AHfmW6r6mgNoeIsQ,9440 +astropy/wcs/tests/maps/1904-66_TAN.hdr,sha256=RA-RxnldIz_t3zXJ9Rkv7pvp0NcWJ-EwvmdRX_4mPms,9200 +astropy/wcs/tests/maps/1904-66_TSC.hdr,sha256=Zxi148pBbp6bMayrJrtDCEGxH6NYNmli6nrDkgfDzXk,9200 +astropy/wcs/tests/maps/1904-66_ZEA.hdr,sha256=_VAlXDZae5oTGGeq8WJRF8C6yAIimz6msjg6SnG3cuw,9200 +astropy/wcs/tests/maps/1904-66_ZPN.hdr,sha256=M07CWMPW_wWZxjwbwa6KwUi2rvyZac5MLNR_YN9uJ-4,10800 +astropy/wcs/tests/spectra/orion-freq-1.hdr,sha256=dpLVpCS9rKTO1Rek30EGTseiMNf9tOTt5sMcmI4yYCA,30400 +astropy/wcs/tests/spectra/orion-freq-4.hdr,sha256=xolKD3Rr8jkBJSpcNS9rZ7Cs8TGgoESKV2rAjp8ThkI,30640 +astropy/wcs/tests/spectra/orion-velo-1.hdr,sha256=aG6xVghXd_ts1cVtT5ep9HUSJfqCM2SqAcUKn18NbuA,29680 +astropy/wcs/tests/spectra/orion-velo-4.hdr,sha256=y9rb2HwCLtTRJ5BRj30WC2yKnOvquvvPl7grsaduU_s,29920 +astropy/wcs/tests/spectra/orion-wave-1.hdr,sha256=lK1uIc8gYL3uP-CNMhM0NXoHZa18pWYCaNcUTKAYmZc,29600 +astropy/wcs/tests/spectra/orion-wave-4.hdr,sha256=beF-BphLGxC69fv_UfBQwdmeRxxtjH4Lk-tVwNTXD_8,29840 +astropy/wcs/tests/test_pickle.py,sha256=AvxYRLo7zi5oBA3cwkC6tBdl6iVkfMixtkQecG1vZ4k,2964 +astropy/wcs/tests/test_pickle.pyc,, +astropy/wcs/tests/test_profiling.py,sha256=FQC98FceLCzT0BBljbhqFjYVLpdjiswEToquqb-8PXA,2564 +astropy/wcs/tests/test_profiling.pyc,, +astropy/wcs/tests/test_utils.py,sha256=3F7nIl8lfD6vM3QvUaY6l_pBy_274RrjR8kxp7I8ekM,16451 +astropy/wcs/tests/test_utils.pyc,, +astropy/wcs/tests/test_wcs.py,sha256=l5xK1zheymjj-23Mqij2GoxC7SbT9kDTMZTCwLB5iPQ,34470 +astropy/wcs/tests/test_wcs.pyc,, +astropy/wcs/tests/test_wcsprm.py,sha256=sKgxvsLiBMqGLNI6EnUjCR7TGBMcmnglJZR3DorTW4I,23582 +astropy/wcs/tests/test_wcsprm.pyc,, +astropy/wcs/utils.py,sha256=RhWRUO7q6QAQKENkExPWjmS2gu2vixrB_jpiX42QVv4,16854 +astropy/wcs/utils.pyc,, +astropy/wcs/wcs.py,sha256=wPRNgJ0Qlj48JnbLsRtx8-TguzIqY1bvwVx-QacTmGo,124445 +astropy/wcs/wcs.pyc,, +astropy/wcs/wcslint.py,sha256=GzyUrGhSFGOCQJHQscb1DpEOQsv8WOfoqASgr008mdw,613 +astropy/wcs/wcslint.pyc,, diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/WHEEL b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..c124558c3005dbae0f043491744d4bef502147d6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.32.2) +Root-Is-Purelib: false +Tag: cp27-cp27mu-linux_x86_64 + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/entry_points.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f5f356932211cc01806a91f6fe1285305104984 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/entry_points.txt @@ -0,0 +1,10 @@ +[console_scripts] +fits2bitmap = astropy.visualization.scripts.fits2bitmap:main +fitscheck = astropy.io.fits.scripts.fitscheck:main +fitsdiff = astropy.io.fits.scripts.fitsdiff:main +fitsheader = astropy.io.fits.scripts.fitsheader:main +fitsinfo = astropy.io.fits.scripts.fitsinfo:main +samp_hub = astropy.samp.hub_script:hub_script +volint = astropy.io.votable.volint:main +wcslint = astropy.wcs.wcslint:main + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/top_level.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae8bed802e00574393a38c17e038fb87ce7bf8e8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy-2.0.9.dist-info/top_level.txt @@ -0,0 +1 @@ +astropy diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/CITATION b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/CITATION new file mode 100644 index 0000000000000000000000000000000000000000..e9b47516a3459cbf03a0d5a275f68893a20f7c75 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/CITATION @@ -0,0 +1,112 @@ +If you use Astropy for work/research presented in a publication (whether +directly, or as a dependency to another package), we recommend and encourage +the following acknowledgment: + + This research made use of Astropy, a community-developed core Python package + for Astronomy (Astropy Collaboration, 2018). + +where (Astropy Collaboration, 2018) is a citation to this paper: + + http://adsabs.harvard.edu/abs/2018AJ....156..123T + +An earlier paper is also available describing the status of the package at +the time of v0.2. If you have used Astropy for a long time, you are +encouraged to acknowledge both papers: + + This research made use of Astropy, a community-developed core Python package + for Astronomy (Astropy Collaboration, 2013, 2018). + +where (Astropy Collaboration, 2013) is a citation to this paper: + + http://adsabs.harvard.edu/abs/2013A%26A...558A..33A + +We encourage you to also include citations to the papers in the main text +wherever appropriate. + + +Recommended BibTeX entries for the above citations are: + +@ARTICLE{2018AJ....156..123T, + author = {{The Astropy Collaboration} and {Price-Whelan}, A.~M. and {Sip{\H o}cz}, B.~M. and + {G{\"u}nther}, H.~M. and {Lim}, P.~L. and {Crawford}, S.~M. and + {Conseil}, S. and {Shupe}, D.~L. and {Craig}, M.~W. and {Dencheva}, N. and + {Ginsburg}, A. and {VanderPlas}, J.~T. and {Bradley}, L.~D. and + {P{\'e}rez-Su{\'a}rez}, D. and {de Val-Borro}, M. and {Paper Contributors}, (. and + {Aldcroft}, T.~L. and {Cruz}, K.~L. and {Robitaille}, T.~P. and + {Tollerud}, E.~J. and {Coordination Committee}, (. and {Ardelean}, C. and + {Babej}, T. and {Bach}, Y.~P. and {Bachetti}, M. and {Bakanov}, A.~V. and + {Bamford}, S.~P. and {Barentsen}, G. and {Barmby}, P. and {Baumbach}, A. and + {Berry}, K.~L. and {Biscani}, F. and {Boquien}, M. and {Bostroem}, K.~A. and + {Bouma}, L.~G. and {Brammer}, G.~B. and {Bray}, E.~M. and {Breytenbach}, H. and + {Buddelmeijer}, H. and {Burke}, D.~J. and {Calderone}, G. and + {Cano Rodr{\'{\i}}guez}, J.~L. and {Cara}, M. and {Cardoso}, J.~V.~M. and + {Cheedella}, S. and {Copin}, Y. and {Corrales}, L. and {Crichton}, D. and + {D{\rsquo}Avella}, D. and {Deil}, C. and {Depagne}, {\'E}. and + {Dietrich}, J.~P. and {Donath}, A. and {Droettboom}, M. and + {Earl}, N. and {Erben}, T. and {Fabbro}, S. and {Ferreira}, L.~A. and + {Finethy}, T. and {Fox}, R.~T. and {Garrison}, L.~H. and {Gibbons}, S.~L.~J. and + {Goldstein}, D.~A. and {Gommers}, R. and {Greco}, J.~P. and + {Greenfield}, P. and {Groener}, A.~M. and {Grollier}, F. and + {Hagen}, A. and {Hirst}, P. and {Homeier}, D. and {Horton}, A.~J. and + {Hosseinzadeh}, G. and {Hu}, L. and {Hunkeler}, J.~S. and {Ivezi{\'c}}, {\v Z}. and + {Jain}, A. and {Jenness}, T. and {Kanarek}, G. and {Kendrew}, S. and + {Kern}, N.~S. and {Kerzendorf}, W.~E. and {Khvalko}, A. and + {King}, J. and {Kirkby}, D. and {Kulkarni}, A.~M. and {Kumar}, A. and + {Lee}, A. and {Lenz}, D. and {Littlefair}, S.~P. and {Ma}, Z. and + {Macleod}, D.~M. and {Mastropietro}, M. and {McCully}, C. and + {Montagnac}, S. and {Morris}, B.~M. and {Mueller}, M. and {Mumford}, S.~J. and + {Muna}, D. and {Murphy}, N.~A. and {Nelson}, S. and {Nguyen}, G.~H. and + {Ninan}, J.~P. and {N{\"o}the}, M. and {Ogaz}, S. and {Oh}, S. and + {Parejko}, J.~K. and {Parley}, N. and {Pascual}, S. and {Patil}, R. and + {Patil}, A.~A. and {Plunkett}, A.~L. and {Prochaska}, J.~X. and + {Rastogi}, T. and {Reddy Janga}, V. and {Sabater}, J. and {Sakurikar}, P. and + {Seifert}, M. and {Sherbert}, L.~E. and {Sherwood-Taylor}, H. and + {Shih}, A.~Y. and {Sick}, J. and {Silbiger}, M.~T. and {Singanamalla}, S. and + {Singer}, L.~P. and {Sladen}, P.~H. and {Sooley}, K.~A. and + {Sornarajah}, S. and {Streicher}, O. and {Teuben}, P. and {Thomas}, S.~W. and + {Tremblay}, G.~R. and {Turner}, J.~E.~H. and {Terr{\'o}n}, V. and + {van Kerkwijk}, M.~H. and {de la Vega}, A. and {Watkins}, L.~L. and + {Weaver}, B.~A. and {Whitmore}, J.~B. and {Woillez}, J. and + {Zabalza}, V. and {Contributors}, (.}, + title = "{The Astropy Project: Building an Open-science Project and Status of the v2.0 Core Package}", + journal = {\aj}, +archivePrefix = "arXiv", + eprint = {1801.02634}, + primaryClass = "astro-ph.IM", + keywords = {methods: data analysis, methods: miscellaneous, methods: statistical, reference systems }, + year = 2018, + month = sep, + volume = 156, + eid = {123}, + pages = {123}, + doi = {10.3847/1538-3881/aabc4f}, + adsurl = {http://adsabs.harvard.edu/abs/2018AJ....156..123T}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System} +} + +@ARTICLE{2013A&A...558A..33A, + author = {{Astropy Collaboration} and {Robitaille}, T.~P. and {Tollerud}, E.~J. and + {Greenfield}, P. and {Droettboom}, M. and {Bray}, E. and {Aldcroft}, T. and + {Davis}, M. and {Ginsburg}, A. and {Price-Whelan}, A.~M. and + {Kerzendorf}, W.~E. and {Conley}, A. and {Crighton}, N. and + {Barbary}, K. and {Muna}, D. and {Ferguson}, H. and {Grollier}, F. and + {Parikh}, M.~M. and {Nair}, P.~H. and {Unther}, H.~M. and {Deil}, C. and + {Woillez}, J. and {Conseil}, S. and {Kramer}, R. and {Turner}, J.~E.~H. and + {Singer}, L. and {Fox}, R. and {Weaver}, B.~A. and {Zabalza}, V. and + {Edwards}, Z.~I. and {Azalee Bostroem}, K. and {Burke}, D.~J. and + {Casey}, A.~R. and {Crawford}, S.~M. and {Dencheva}, N. and + {Ely}, J. and {Jenness}, T. and {Labrie}, K. and {Lian Lim}, P. and + {Pierfederici}, F. and {Pontzen}, A. and {Ptak}, A. and {Refsdal}, B. and + {Servillat}, M. and {Streicher}, O.}, + title = "{Astropy: A community Python package for astronomy}", + journal = {\aap}, + keywords = {methods: data analysis, methods: miscellaneous, virtual observatory tools}, + year = 2013, + month = oct, + volume = 558, + eid = {A33}, + pages = {A33}, + doi = {10.1051/0004-6361/201322068}, + adsurl = {http://adsabs.harvard.edu/abs/2013A%26A...558A..33A}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System} +} diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2071026e9b9f3992467f71e3fc8774d467677393 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/__init__.py @@ -0,0 +1,341 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astropy is a package intended to contain core functionality and some +common tools needed for performing astronomy and astrophysics research with +Python. It also provides an index for other astronomy packages and tools for +managing them. +""" + +from __future__ import absolute_import + +import sys +import os +from warnings import warn + +if sys.version_info[:2] < (2, 7): + warn("Astropy does not support Python 2.6 (in v1.2 and later)") + + +def _is_astropy_source(path=None): + """ + Returns whether the source for this module is directly in an astropy + source distribution or checkout. + """ + + # If this __init__.py file is in ./astropy/ then import is within a source + # dir .astropy-root is a file distributed with the source, but that should + # not installed + if path is None: + path = os.path.join(os.path.dirname(__file__), os.pardir) + elif os.path.isfile(path): + path = os.path.dirname(path) + + source_dir = os.path.abspath(path) + return os.path.exists(os.path.join(source_dir, '.astropy-root')) + + +def _is_astropy_setup(): + """ + Returns whether we are currently being imported in the context of running + Astropy's setup.py. + """ + + main_mod = sys.modules.get('__main__') + if not main_mod: + return False + + return (getattr(main_mod, '__file__', False) and + os.path.basename(main_mod.__file__).rstrip('co') == 'setup.py' and + _is_astropy_source(main_mod.__file__)) + + +# this indicates whether or not we are in astropy's setup.py +try: + _ASTROPY_SETUP_ +except NameError: + from sys import version_info + if version_info[0] >= 3: + import builtins + else: + import __builtin__ as builtins + + # This will set the _ASTROPY_SETUP_ to True by default if + # we are running Astropy's setup.py + builtins._ASTROPY_SETUP_ = _is_astropy_setup() + + +try: + from .version import version as __version__ +except ImportError: + # TODO: Issue a warning using the logging framework + __version__ = '' +try: + from .version import githash as __githash__ +except ImportError: + # TODO: Issue a warning using the logging framework + __githash__ = '' + + +__minimum_numpy_version__ = '1.9.0' + + +# The location of the online documentation for astropy +# This location will normally point to the current released version of astropy +if 'dev' in __version__: + online_docs_root = 'http://docs.astropy.org/en/latest/' +else: + online_docs_root = 'http://docs.astropy.org/en/{0}/'.format(__version__) + + +def _check_numpy(): + """ + Check that Numpy is installed and it is of the minimum version we + require. + """ + # Note: We could have used distutils.version for this comparison, + # but it seems like overkill to import distutils at runtime. + requirement_met = False + + try: + import numpy + except ImportError: + pass + else: + from .utils import minversion + requirement_met = minversion(numpy, __minimum_numpy_version__) + + if not requirement_met: + msg = ("Numpy version {0} or later must be installed to use " + "Astropy".format(__minimum_numpy_version__)) + raise ImportError(msg) + + return numpy + + +if not _ASTROPY_SETUP_: + _check_numpy() + + +from . import config as _config + + +class Conf(_config.ConfigNamespace): + """ + Configuration parameters for `astropy`. + """ + + unicode_output = _config.ConfigItem( + False, + 'When True, use Unicode characters when outputting values, and ' + 'displaying widgets at the console.') + use_color = _config.ConfigItem( + sys.platform != 'win32', + 'When True, use ANSI color escape sequences when writing to the console.', + aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR']) + max_lines = _config.ConfigItem( + None, + description='Maximum number of lines in the display of pretty-printed ' + 'objects. If not provided, try to determine automatically from the ' + 'terminal size. Negative numbers mean no limit.', + cfgtype='integer(default=None)', + aliases=['astropy.table.pprint.max_lines']) + max_width = _config.ConfigItem( + None, + description='Maximum number of characters per line in the display of ' + 'pretty-printed objects. If not provided, try to determine ' + 'automatically from the terminal size. Negative numbers mean no ' + 'limit.', + cfgtype='integer(default=None)', + aliases=['astropy.table.pprint.max_width']) + + +conf = Conf() + +# Create the test() function +from .tests.runner import TestRunner +test = TestRunner.make_test_runner_in(__path__[0]) + + +# if we are *not* in setup mode, import the logger and possibly populate the +# configuration file with the defaults +def _initialize_astropy(): + from . import config + + def _rollback_import(message): + log.error(message) + # Now disable exception logging to avoid an annoying error in the + # exception logger before we raise the import error: + _teardown_log() + + # Roll back any astropy sub-modules that have been imported thus + # far + + for key in list(sys.modules): + if key.startswith('astropy.'): + del sys.modules[key] + raise ImportError('astropy') + + try: + from .utils import _compiler + except ImportError: + if _is_astropy_source(): + log.warning('You appear to be trying to import astropy from ' + 'within a source checkout without building the ' + 'extension modules first. Attempting to (re)build ' + 'extension modules:') + + try: + _rebuild_extensions() + except BaseException as exc: + _rollback_import( + 'An error occurred while attempting to rebuild the ' + 'extension modules. Please try manually running ' + '`./setup.py develop` or `./setup.py build_ext ' + '--inplace` to see what the issue was. Extension ' + 'modules must be successfully compiled and importable ' + 'in order to import astropy.') + # Reraise the Exception only in case it wasn't an Exception, + # for example if a "SystemExit" or "KeyboardInterrupt" was + # invoked. + if not isinstance(exc, Exception): + raise + + else: + # Outright broken installation; don't be nice. + raise + + # add these here so we only need to cleanup the namespace at the end + config_dir = os.path.dirname(__file__) + + try: + config.configuration.update_default_config(__package__, config_dir) + except config.configuration.ConfigurationDefaultMissingError as e: + wmsg = (e.args[0] + " Cannot install default profile. If you are " + "importing from source, this is expected.") + warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg)) + + +def _rebuild_extensions(): + global __version__ + global __githash__ + + import subprocess + import time + + from .utils.console import Spinner + from .extern.six import next + + devnull = open(os.devnull, 'w') + old_cwd = os.getcwd() + os.chdir(os.path.join(os.path.dirname(__file__), os.pardir)) + try: + sp = subprocess.Popen([sys.executable, 'setup.py', 'build_ext', + '--inplace'], stdout=devnull, + stderr=devnull) + with Spinner('Rebuilding extension modules') as spinner: + while sp.poll() is None: + next(spinner) + time.sleep(0.05) + finally: + os.chdir(old_cwd) + devnull.close() + + if sp.returncode != 0: + raise OSError('Running setup.py build_ext --inplace failed ' + 'with error code {0}: try rerunning this command ' + 'manually to check what the error was.'.format( + sp.returncode)) + + # Try re-loading module-level globals from the astropy.version module, + # which may not have existed before this function ran + try: + from .version import version as __version__ + except ImportError: + pass + + try: + from .version import githash as __githash__ + except ImportError: + pass + + +# Set the bibtex entry to the article referenced in CITATION +def _get_bibtex(): + import re + + citation_file = os.path.join(os.path.dirname(__file__), 'CITATION') + + with open(citation_file, 'r') as citation: + refs = re.findall(r'\{[^()]*\}', citation.read()) + if len(refs) == 0: return '' + bibtexreference = "@ARTICLE{0}".format(refs[0]) + return bibtexreference + + +__citation__ = __bibtex__ = _get_bibtex() + +import logging + +# Use the root logger as a dummy log before initilizing Astropy's logger +log = logging.getLogger() + + +if not _ASTROPY_SETUP_: + from .logger import _init_log, _teardown_log + + log = _init_log() + + _initialize_astropy() + + from .utils.misc import find_api_page + + +def online_help(query): + """ + Search the online Astropy documentation for the given query. + Opens the results in the default web browser. Requires an active + Internet connection. + + Parameters + ---------- + query : str + The search query. + """ + from .extern.six.moves.urllib.parse import urlencode + import webbrowser + + version = __version__ + if 'dev' in version: + version = 'latest' + else: + version = 'v' + version + + url = 'http://docs.astropy.org/en/{0}/search.html?{1}'.format( + version, urlencode({'q': query})) + + webbrowser.open(url) + + +__dir_inc__ = ['__version__', '__githash__', '__minimum_numpy_version__', + '__bibtex__', 'test', 'log', 'find_api_page', 'online_help', + 'online_docs_root', 'conf'] + + +from types import ModuleType as __module_type__ +# Clean up top-level namespace--delete everything that isn't in __dir_inc__ +# or is a magic attribute, and that isn't a submodule of this package +for varname in dir(): + if not ((varname.startswith('__') and varname.endswith('__')) or + varname in __dir_inc__ or + (varname[0] != '_' and + isinstance(locals()[varname], __module_type__) and + locals()[varname].__name__.startswith(__name__ + '.'))): + # The last clause in the the above disjunction deserves explanation: + # When using relative imports like ``from .. import config``, the + # ``config`` variable is automatically created in the namespace of + # whatever module ``..`` resolves to (in this case astropy). This + # happens a few times just in the module setup above. This allows + # the cleanup to keep any public submodules of the astropy package + del locals()[varname] + +del varname, __module_type__ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6105b747a96f16e60fd60d225182674ae6238e9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_compiler.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_compiler.so new file mode 100755 index 0000000000000000000000000000000000000000..fc87d0b29fb140e5acffc714147b078b4b8b6cb0 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_compiler.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..35bb1ab8fc890966bb60dfa2ec43916960bd2509 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/__init__.py @@ -0,0 +1,7 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +try: + # The ERFA wrappers are not guaranteed available at setup time + from .core import * +except ImportError: + if not _ASTROPY_SETUP_: + raise diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ed49bfea6aa106ae79c46e8caf6cff3b4990285 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/_core.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/_core.so new file mode 100755 index 0000000000000000000000000000000000000000..722adb57d4a86088b8a4627a5aba179003a3d9b2 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/_core.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/core.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/core.py new file mode 100644 index 0000000000000000000000000000000000000000..d3edeb46938e5faa157979da4f9200385ac02448 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/core.py @@ -0,0 +1,22631 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# "core.py" is auto-generated by erfa_generator.py from the template +# "core.py.templ". Do *not* edit "core.py" directly, instead edit +# "core.py.templ" and run erfa_generator.py from the source directory to +# update it. + +""" +This module uses the Python/C API to wrap the ERFA library in +numpy-vectorized equivalents. + +..warning:: + This is currently *not* part of the public Astropy API, and may change in + the future. + + +The key idea is that any function can be called with inputs that are arrays, +and the wrappers will automatically vectorize and call the ERFA functions for +each item using broadcasting rules for numpy. So the return values are always +numpy arrays of some sort. + +For ERFA functions that take/return vectors or matrices, the vector/matrix +dimension(s) are always the *last* dimension(s). For example, if you +want to give ten matrices (i.e., the ERFA input type is double[3][3]), +you would pass in a (10, 3, 3) numpy array. If the output of the ERFA +function is scalar, you'll get back a length-10 1D array. + +Note that the C part of these functions are implemented in a separate +module (compiled as ``_core``), derived from the ``core.c`` file. +Splitting the wrappers into separate pure-python and C portions +dramatically reduces compilation time without notably impacting +performance. (See issue [#3063] on the github repository for more +about this.) +""" +from __future__ import absolute_import, division, print_function + +import warnings + +from ..utils.exceptions import AstropyUserWarning + +import numpy +from . import _core + +# TODO: remove the above variable and the code using it and make_outputs_scalar +# when numpy < 1.8 is no longer supported + +__all__ = ['ErfaError', 'ErfaWarning', + 'cal2jd', 'epb', 'epb2jd', 'epj', 'epj2jd', 'jd2cal', 'jdcalf', 'ab', 'apcg', 'apcg13', 'apci', 'apci13', 'apco', 'apco13', 'apcs', 'apcs13', 'aper', 'aper13', 'apio', 'apio13', 'atci13', 'atciq', 'atciqn', 'atciqz', 'atco13', 'atic13', 'aticq', 'aticqn', 'atio13', 'atioq', 'atoc13', 'atoi13', 'atoiq', 'ld', 'ldn', 'ldsun', 'pmpx', 'pmsafe', 'pvtob', 'refco', 'epv00', 'plan94', 'fad03', 'fae03', 'faf03', 'faju03', 'fal03', 'falp03', 'fama03', 'fame03', 'fane03', 'faom03', 'fapa03', 'fasa03', 'faur03', 'fave03', 'bi00', 'bp00', 'bp06', 'bpn2xy', 'c2i00a', 'c2i00b', 'c2i06a', 'c2ibpn', 'c2ixy', 'c2ixys', 'c2t00a', 'c2t00b', 'c2t06a', 'c2tcio', 'c2teqx', 'c2tpe', 'c2txy', 'eo06a', 'eors', 'fw2m', 'fw2xy', 'ltp', 'ltpb', 'ltpecl', 'ltpequ', 'num00a', 'num00b', 'num06a', 'numat', 'nut00a', 'nut00b', 'nut06a', 'nut80', 'nutm80', 'obl06', 'obl80', 'p06e', 'pb06', 'pfw06', 'pmat00', 'pmat06', 'pmat76', 'pn00', 'pn00a', 'pn00b', 'pn06', 'pn06a', 'pnm00a', 'pnm00b', 'pnm06a', 'pnm80', 'pom00', 'pr00', 'prec76', 's00', 's00a', 's00b', 's06', 's06a', 'sp00', 'xy06', 'xys00a', 'xys00b', 'xys06a', 'ee00', 'ee00a', 'ee00b', 'ee06a', 'eect00', 'eqeq94', 'era00', 'gmst00', 'gmst06', 'gmst82', 'gst00a', 'gst00b', 'gst06', 'gst06a', 'gst94', 'pvstar', 'starpv', 'fk52h', 'fk5hip', 'fk5hz', 'h2fk5', 'hfk5z', 'starpm', 'eceq06', 'ecm06', 'eqec06', 'lteceq', 'ltecm', 'lteqec', 'g2icrs', 'icrs2g', 'eform', 'gc2gd', 'gc2gde', 'gd2gc', 'gd2gce', 'd2dtf', 'dat', 'dtdb', 'dtf2d', 'taitt', 'taiut1', 'taiutc', 'tcbtdb', 'tcgtt', 'tdbtcb', 'tdbtt', 'tttai', 'tttcg', 'tttdb', 'ttut1', 'ut1tai', 'ut1tt', 'ut1utc', 'utctai', 'utcut1', 'a2af', 'a2tf', 'af2a', 'anp', 'anpm', 'd2tf', 'tf2a', 'tf2d', 'rxp', 'rxpv', 'trxp', 'trxpv', 'c2s', 'p2s', 'pv2s', 's2c', 's2p', 's2pv', + 'DPI', 'D2PI', 'DR2D', 'DD2R', 'DR2AS', 'DAS2R', 'DS2R', 'TURNAS', 'DMAS2R', 'DTY', 'DAYSEC', 'DJY', 'DJC', 'DJM', 'DJ00', 'DJM0', 'DJM00', 'DJM77', 'TTMTAI', 'DAU', 'CMPS', 'AULT', 'DC', 'ELG', 'ELB', 'TDB0', 'SRS', 'WGS84', 'GRS80', 'WGS72', + # TODO: delete the functions below when they can get auto-generated + 'version', 'version_major', 'version_minor', 'version_micro', 'sofa_version', + 'dt_eraASTROM', 'dt_eraLDBODY'] + + +# <---------------------------------Error-handling----------------------------> + +class ErfaError(ValueError): + """ + A class for errors triggered by ERFA functions (status codes < 0) + """ + + +class ErfaWarning(AstropyUserWarning): + """ + A class for warnings triggered by ERFA functions (status codes > 0) + """ + + +STATUS_CODES = {} # populated below before each function that returns an int + +# This is a hard-coded list of status codes that need to be remapped, +# such as to turn errors into warnings. +STATUS_CODES_REMAP = { + 'cal2jd': {-3: 3} +} + + +def check_errwarn(statcodes, func_name): + # Remap any errors into warnings in the STATUS_CODES_REMAP dict. + if func_name in STATUS_CODES_REMAP: + for before, after in STATUS_CODES_REMAP[func_name].items(): + statcodes[statcodes == before] = after + STATUS_CODES[func_name][after] = STATUS_CODES[func_name][before] + + if numpy.any(statcodes<0): + # errors present - only report the errors. + if statcodes.shape: + statcodes = statcodes[statcodes<0] + + errcodes = numpy.unique(statcodes) + + errcounts = dict([(e, numpy.sum(statcodes==e)) for e in errcodes]) + + elsemsg = STATUS_CODES[func_name].get('else', None) + if elsemsg is None: + errmsgs = dict([(e, STATUS_CODES[func_name].get(e, 'Return code ' + str(e))) for e in errcodes]) + else: + errmsgs = dict([(e, STATUS_CODES[func_name].get(e, elsemsg)) for e in errcodes]) + + emsg = ', '.join(['{0} of "{1}"'.format(errcounts[e], errmsgs[e]) for e in errcodes]) + raise ErfaError('ERFA function "' + func_name + '" yielded ' + emsg) + + elif numpy.any(statcodes>0): + #only warnings present + if statcodes.shape: + statcodes = statcodes[statcodes>0] + + warncodes = numpy.unique(statcodes) + + warncounts = dict([(w, numpy.sum(statcodes==w)) for w in warncodes]) + + elsemsg = STATUS_CODES[func_name].get('else', None) + if elsemsg is None: + warnmsgs = dict([(w, STATUS_CODES[func_name].get(w, 'Return code ' + str(w))) for w in warncodes]) + else: + warnmsgs = dict([(w, STATUS_CODES[func_name].get(w, elsemsg)) for w in warncodes]) + + wmsg = ', '.join(['{0} of "{1}"'.format(warncounts[w], warnmsgs[w]) for w in warncodes]) + warnings.warn('ERFA function "' + func_name + '" yielded ' + wmsg, ErfaWarning) + + +# <-------------------------trailing shape verification-----------------------> + +def check_trailing_shape(arr, shape, name): + try: + if arr.shape[-len(shape):] != shape: + raise Exception() + except: + raise ValueError("{0} must be of trailing dimensions {1}".format(name, shape)) + +# <--------------------------Actual ERFA-wrapping code------------------------> + +dt_eraASTROM = numpy.dtype([('pmt','d'), + ('eb','d',(3,)), + ('eh','d',(3,)), + ('em','d'), + ('v','d',(3,)), + ('bm1','d'), + ('bpn','d',(3,3)), + ('along','d'), + ('phi','d'), + ('xpl','d'), + ('ypl','d'), + ('sphi','d'), + ('cphi','d'), + ('diurab','d'), + ('eral','d'), + ('refa','d'), + ('refb','d')], align=True) + +dt_eraLDBODY = numpy.dtype([('bm','d'), + ('dl','d'), + ('pv','d',(2,3))], align=True) + + + +DPI = (3.141592653589793238462643) +"""Pi""" +D2PI = (6.283185307179586476925287) +"""2Pi""" +DR2D = (57.29577951308232087679815) +"""Radians to degrees""" +DD2R = (1.745329251994329576923691e-2) +"""Degrees to radians""" +DR2AS = (206264.8062470963551564734) +"""Radians to arcseconds""" +DAS2R = (4.848136811095359935899141e-6) +"""Arcseconds to radians""" +DS2R = (7.272205216643039903848712e-5) +"""Seconds of time to radians""" +TURNAS = (1296000.0) +"""Arcseconds in a full circle""" +DMAS2R = (DAS2R / 1e3) +"""Milliarcseconds to radians""" +DTY = (365.242198781) +"""Length of tropical year B1900 (days)""" +DAYSEC = (86400.0) +"""Seconds per day.""" +DJY = (365.25) +"""Days per Julian year""" +DJC = (36525.0) +"""Days per Julian century""" +DJM = (365250.0) +"""Days per Julian millennium""" +DJ00 = (2451545.0) +"""Reference epoch (J2000.0), Julian Date""" +DJM0 = (2400000.5) +"""Julian Date of Modified Julian Date zero""" +DJM00 = (51544.5) +"""Reference epoch (J2000.0), Modified Julian Date""" +DJM77 = (43144.0) +"""1977 Jan 1.0 as MJD""" +TTMTAI = (32.184) +"""TT minus TAI (s)""" +DAU = (149597870.7e3) +"""Astronomical unit (m, IAU 2012)""" +CMPS = 299792458.0 +"""Speed of light (m/s)""" +AULT = (DAU/CMPS) +"""Light time for 1 au (s)""" +DC = (DAYSEC/AULT) +"""Speed of light (au per day)""" +ELG = (6.969290134e-10) +"""L_G = 1 - d(TT)/d(TCG)""" +ELB = (1.550519768e-8) +"""L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0""" +TDB0 = (-6.55e-5) +"""L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0""" +SRS = 1.97412574336e-8 +"""Schwarzschild radius of the Sun (au) = 2 * 1.32712440041e20 / (2.99792458e8)^2 / 1.49597870700e11""" +WGS84 = 1 +"""Reference ellipsoids""" +GRS80 = 2 +"""Reference ellipsoids""" +WGS72 = 3 +"""Reference ellipsoids""" + + +def cal2jd(iy, im, id): + """ + Wrapper for ERFA function ``eraCal2jd``. + + Parameters + ---------- + iy : int array + im : int array + id : int array + + Returns + ------- + djm0 : double array + djm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C a l 2 j d + - - - - - - - - - - + + Gregorian Calendar to Julian Date. + + Given: + iy,im,id int year, month, day in Gregorian calendar (Note 1) + + Returned: + djm0 double MJD zero-point: always 2400000.5 + djm double Modified Julian Date for 0 hrs + + Returned (function value): + int status: + 0 = OK + -1 = bad year (Note 3: JD not computed) + -2 = bad month (JD not computed) + -3 = bad day (JD computed) + + Notes: + + 1) The algorithm used is valid from -4800 March 1, but this + implementation rejects dates before -4799 January 1. + + 2) The Julian Date is returned in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding djm0 and + djm. + + 3) In early eras the conversion is from the "Proleptic Gregorian + Calendar"; no account is taken of the date(s) of adoption of + the Gregorian Calendar, nor is the AD/BC numbering convention + observed. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 12.92 (p604). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + iy_in = numpy.array(iy, dtype=numpy.intc, order="C", copy=False, subok=True) + im_in = numpy.array(im, dtype=numpy.intc, order="C", copy=False, subok=True) + id_in = numpy.array(id, dtype=numpy.intc, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), iy_in, im_in, id_in) + djm0_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + djm_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [iy_in, im_in, id_in, djm0_out, djm_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._cal2jd(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'cal2jd') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(djm0_out.shape) > 0 and djm0_out.shape[0] == 1 + djm0_out = djm0_out.reshape(djm0_out.shape[1:]) + assert len(djm_out.shape) > 0 and djm_out.shape[0] == 1 + djm_out = djm_out.reshape(djm_out.shape[1:]) + + return djm0_out, djm_out +STATUS_CODES['cal2jd'] = {0: 'OK', -1: 'bad year (Note 3: JD not computed)', -2: 'bad month (JD not computed)', -3: 'bad day (JD computed)'} + + + +def epb(dj1, dj2): + """ + Wrapper for ERFA function ``eraEpb``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a E p b + - - - - - - - + + Julian Date to Besselian Epoch. + + Given: + dj1,dj2 double Julian Date (see note) + + Returned (function value): + double Besselian Epoch. + + Note: + + The Julian Date is supplied in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding dj1 and + dj2. The maximum resolution is achieved if dj1 is 2451545.0 + (J2000.0). + + Reference: + + Lieske, J.H., 1979. Astron.Astrophys., 73, 282. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epb(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def epb2jd(epb): + """ + Wrapper for ERFA function ``eraEpb2jd``. + + Parameters + ---------- + epb : double array + + Returns + ------- + djm0 : double array + djm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E p b 2 j d + - - - - - - - - - - + + Besselian Epoch to Julian Date. + + Given: + epb double Besselian Epoch (e.g. 1957.3) + + Returned: + djm0 double MJD zero-point: always 2400000.5 + djm double Modified Julian Date + + Note: + + The Julian Date is returned in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding djm0 and + djm. + + Reference: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epb_in = numpy.array(epb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epb_in) + djm0_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + djm_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epb_in, djm0_out, djm_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epb2jd(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(djm0_out.shape) > 0 and djm0_out.shape[0] == 1 + djm0_out = djm0_out.reshape(djm0_out.shape[1:]) + assert len(djm_out.shape) > 0 and djm_out.shape[0] == 1 + djm_out = djm_out.reshape(djm_out.shape[1:]) + + return djm0_out, djm_out + + +def epj(dj1, dj2): + """ + Wrapper for ERFA function ``eraEpj``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a E p j + - - - - - - - + + Julian Date to Julian Epoch. + + Given: + dj1,dj2 double Julian Date (see note) + + Returned (function value): + double Julian Epoch + + Note: + + The Julian Date is supplied in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding dj1 and + dj2. The maximum resolution is achieved if dj1 is 2451545.0 + (J2000.0). + + Reference: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epj(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def epj2jd(epj): + """ + Wrapper for ERFA function ``eraEpj2jd``. + + Parameters + ---------- + epj : double array + + Returns + ------- + djm0 : double array + djm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E p j 2 j d + - - - - - - - - - - + + Julian Epoch to Julian Date. + + Given: + epj double Julian Epoch (e.g. 1996.8) + + Returned: + djm0 double MJD zero-point: always 2400000.5 + djm double Modified Julian Date + + Note: + + The Julian Date is returned in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding djm0 and + djm. + + Reference: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + djm0_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + djm_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, djm0_out, djm_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epj2jd(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(djm0_out.shape) > 0 and djm0_out.shape[0] == 1 + djm0_out = djm0_out.reshape(djm0_out.shape[1:]) + assert len(djm_out.shape) > 0 and djm_out.shape[0] == 1 + djm_out = djm_out.reshape(djm_out.shape[1:]) + + return djm0_out, djm_out + + +def jd2cal(dj1, dj2): + """ + Wrapper for ERFA function ``eraJd2cal``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + iy : int array + im : int array + id : int array + fd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a J d 2 c a l + - - - - - - - - - - + + Julian Date to Gregorian year, month, day, and fraction of a day. + + Given: + dj1,dj2 double Julian Date (Notes 1, 2) + + Returned (arguments): + iy int year + im int month + id int day + fd double fraction of day + + Returned (function value): + int status: + 0 = OK + -1 = unacceptable date (Note 1) + + Notes: + + 1) The earliest valid date is -68569.5 (-4900 March 1). The + largest value accepted is 1e9. + + 2) The Julian Date is apportioned in any convenient way between + the arguments dj1 and dj2. For example, JD=2450123.7 could + be expressed in any of these ways, among others: + + dj1 dj2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + 3) In early eras the conversion is from the "proleptic Gregorian + calendar"; no account is taken of the date(s) of adoption of + the Gregorian calendar, nor is the AD/BC numbering convention + observed. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 12.92 (p604). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + iy_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + im_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + id_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + fd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, iy_out, im_out, id_out, fd_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*5 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._jd2cal(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'jd2cal') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(iy_out.shape) > 0 and iy_out.shape[0] == 1 + iy_out = iy_out.reshape(iy_out.shape[1:]) + assert len(im_out.shape) > 0 and im_out.shape[0] == 1 + im_out = im_out.reshape(im_out.shape[1:]) + assert len(id_out.shape) > 0 and id_out.shape[0] == 1 + id_out = id_out.reshape(id_out.shape[1:]) + assert len(fd_out.shape) > 0 and fd_out.shape[0] == 1 + fd_out = fd_out.reshape(fd_out.shape[1:]) + + return iy_out, im_out, id_out, fd_out +STATUS_CODES['jd2cal'] = {0: 'OK', -1: 'unacceptable date (Note 1)'} + + + +def jdcalf(ndp, dj1, dj2): + """ + Wrapper for ERFA function ``eraJdcalf``. + + Parameters + ---------- + ndp : int array + dj1 : double array + dj2 : double array + + Returns + ------- + iymdf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a J d c a l f + - - - - - - - - - - + + Julian Date to Gregorian Calendar, expressed in a form convenient + for formatting messages: rounded to a specified precision. + + Given: + ndp int number of decimal places of days in fraction + dj1,dj2 double dj1+dj2 = Julian Date (Note 1) + + Returned: + iymdf int[4] year, month, day, fraction in Gregorian + calendar + + Returned (function value): + int status: + -1 = date out of range + 0 = OK + +1 = NDP not 0-9 (interpreted as 0) + + Notes: + + 1) The Julian Date is apportioned in any convenient way between + the arguments dj1 and dj2. For example, JD=2450123.7 could + be expressed in any of these ways, among others: + + dj1 dj2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + 2) In early eras the conversion is from the "Proleptic Gregorian + Calendar"; no account is taken of the date(s) of adoption of + the Gregorian Calendar, nor is the AD/BC numbering convention + observed. + + 3) Refer to the function eraJd2cal. + + 4) NDP should be 4 or less if internal overflows are to be + avoided on machines which use 16-bit integers. + + Called: + eraJd2cal JD to Gregorian calendar + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 12.92 (p604). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ndp_in, dj1_in, dj2_in) + iymdf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ndp_in, dj1_in, dj2_in, iymdf_out[...,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._jdcalf(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'jdcalf') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(iymdf_out.shape) > 0 and iymdf_out.shape[0] == 1 + iymdf_out = iymdf_out.reshape(iymdf_out.shape[1:]) + + return iymdf_out +STATUS_CODES['jdcalf'] = {-1: 'date out of range', 0: 'OK', 1: 'NDP not 0-9 (interpreted as 0)'} + + + +def ab(pnat, v, s, bm1): + """ + Wrapper for ERFA function ``eraAb``. + + Parameters + ---------- + pnat : double array + v : double array + s : double array + bm1 : double array + + Returns + ------- + ppr : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - + e r a A b + - - - - - - + + Apply aberration to transform natural direction into proper + direction. + + Given: + pnat double[3] natural direction to the source (unit vector) + v double[3] observer barycentric velocity in units of c + s double distance between the Sun and the observer (au) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + + Returned: + ppr double[3] proper direction to source (unit vector) + + Notes: + + 1) The algorithm is based on Expr. (7.40) in the Explanatory + Supplement (Urban & Seidelmann 2013), but with the following + changes: + + o Rigorous rather than approximate normalization is applied. + + o The gravitational potential term from Expr. (7) in + Klioner (2003) is added, taking into account only the Sun's + contribution. This has a maximum effect of about + 0.4 microarcsecond. + + 2) In almost all cases, the maximum accuracy will be limited by the + supplied velocity. For example, if the ERFA eraEpv00 function is + used, errors of up to 5 microarcseconds could occur. + + References: + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013). + + Klioner, Sergei A., "A practical relativistic model for micro- + arcsecond astrometry in space", Astr. J. 125, 1580-1597 (2003). + + Called: + eraPdp scalar product of two p-vectors + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + pnat_in = numpy.array(pnat, dtype=numpy.double, order="C", copy=False, subok=True) + v_in = numpy.array(v, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + bm1_in = numpy.array(bm1, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pnat_in, (3,), "pnat") + check_trailing_shape(v_in, (3,), "v") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), pnat_in[...,0], v_in[...,0], s_in, bm1_in) + ppr_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [pnat_in[...,0], v_in[...,0], s_in, bm1_in, ppr_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ab(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ppr_out.shape) > 0 and ppr_out.shape[0] == 1 + ppr_out = ppr_out.reshape(ppr_out.shape[1:]) + + return ppr_out + + +def apcg(date1, date2, ebpv, ehp): + """ + Wrapper for ERFA function ``eraApcg``. + + Parameters + ---------- + date1 : double array + date2 : double array + ebpv : double array + ehp : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p c g + - - - - - - - - + + For a geocentric observer, prepare star-independent astrometry + parameters for transformations between ICRS and GCRS coordinates. + The Earth ephemeris is supplied by the caller. + + The parameters produced by this function are required in the + parallax, light deflection and aberration parts of the astrometric + transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + ebpv double[2][3] Earth barycentric pos/vel (au, au/day) + ehp double[3] Earth heliocentric position (au) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 4) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraApcs astrometry parameters, ICRS-GCRS, space observer + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + ebpv_in = numpy.array(ebpv, dtype=numpy.double, order="C", copy=False, subok=True) + ehp_in = numpy.array(ehp, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(ebpv_in, (2, 3), "ebpv") + check_trailing_shape(ehp_in, (3,), "ehp") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0]) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apcg(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apcg13(date1, date2): + """ + Wrapper for ERFA function ``eraApcg13``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p c g 1 3 + - - - - - - - - - - + + For a geocentric observer, prepare star-independent astrometry + parameters for transformations between ICRS and GCRS coordinates. + The caller supplies the date, and ERFA models are used to predict + the Earth ephemeris. + + The parameters produced by this function are required in the + parallax, light deflection and aberration parts of the astrometric + transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) In cases where the caller wishes to supply his own Earth + ephemeris, the function eraApcg can be used instead of the present + function. + + 4) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 5) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraEpv00 Earth position and velocity + eraApcg astrometry parameters, ICRS-GCRS, geocenter + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apcg13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apci(date1, date2, ebpv, ehp, x, y, s): + """ + Wrapper for ERFA function ``eraApci``. + + Parameters + ---------- + date1 : double array + date2 : double array + ebpv : double array + ehp : double array + x : double array + y : double array + s : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p c i + - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between ICRS and geocentric CIRS + coordinates. The Earth ephemeris and CIP/CIO are supplied by the + caller. + + The parameters produced by this function are required in the + parallax, light deflection, aberration, and bias-precession-nutation + parts of the astrometric transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + ebpv double[2][3] Earth barycentric position/velocity (au, au/day) + ehp double[3] Earth heliocentric position (au) + x,y double CIP X,Y (components of unit vector) + s double the CIO locator s (radians) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) In cases where the caller does not wish to provide the Earth + ephemeris and CIP/CIO, the function eraApci13 can be used instead + of the present function. This computes the required quantities + using other ERFA functions. + + 4) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 5) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraApcg astrometry parameters, ICRS-GCRS, geocenter + eraC2ixys celestial-to-intermediate matrix, given X,Y and s + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + ebpv_in = numpy.array(ebpv, dtype=numpy.double, order="C", copy=False, subok=True) + ehp_in = numpy.array(ehp, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(ebpv_in, (2, 3), "ebpv") + check_trailing_shape(ehp_in, (3,), "ehp") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], x_in, y_in, s_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], x_in, y_in, s_in, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apci(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apci13(date1, date2): + """ + Wrapper for ERFA function ``eraApci13``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + astrom : eraASTROM array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p c i 1 3 + - - - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between ICRS and geocentric CIRS + coordinates. The caller supplies the date, and ERFA models are used + to predict the Earth ephemeris and CIP/CIO. + + The parameters produced by this function are required in the + parallax, light deflection, aberration, and bias-precession-nutation + parts of the astrometric transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + eo double* equation of the origins (ERA-GST) + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) In cases where the caller wishes to supply his own Earth + ephemeris and CIP/CIO, the function eraApci can be used instead + of the present function. + + 4) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 5) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraEpv00 Earth position and velocity + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraApci astrometry parameters, ICRS-CIRS + eraEors equation of the origins, given NPB matrix and s + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, astrom_out, eo_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apci13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return astrom_out, eo_out + + +def apco(date1, date2, ebpv, ehp, x, y, s, theta, elong, phi, hm, xp, yp, sp, refa, refb): + """ + Wrapper for ERFA function ``eraApco``. + + Parameters + ---------- + date1 : double array + date2 : double array + ebpv : double array + ehp : double array + x : double array + y : double array + s : double array + theta : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + sp : double array + refa : double array + refb : double array + + Returns + ------- + refa : double array + refb : double array + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p c o + - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between ICRS and observed + coordinates. The caller supplies the Earth ephemeris, the Earth + rotation information and the refraction constants as well as the + site coordinates. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + ebpv double[2][3] Earth barycentric PV (au, au/day, Note 2) + ehp double[3] Earth heliocentric P (au, Note 2) + x,y double CIP X,Y (components of unit vector) + s double the CIO locator s (radians) + theta double Earth rotation angle (radians) + elong double longitude (radians, east +ve, Note 3) + phi double latitude (geodetic, radians, Note 3) + hm double height above ellipsoid (m, geodetic, Note 3) + xp,yp double polar motion coordinates (radians, Note 4) + sp double the TIO locator s' (radians, Note 4) + refa double refraction constant A (radians, Note 5) + refb double refraction constant B (radians, Note 5) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) The vectors eb, eh, and all the astrom vectors, are with respect + to BCRS axes. + + 3) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN + CONVENTION: the longitude required by the present function is + right-handed, i.e. east-positive, in accordance with geographical + convention. + + 4) xp and yp are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions), measured along the + meridians 0 and 90 deg west respectively. sp is the TIO locator + s', in radians, which positions the Terrestrial Intermediate + Origin on the equator. For many applications, xp, yp and + (especially) sp can be set to zero. + + Internally, the polar motion is stored in a form rotated onto the + local meridian. + + 5) The refraction constants refa and refb are for use in a + dZ = A*tan(Z)+B*tan^3(Z) model, where Z is the observed + (i.e. refracted) zenith distance and dZ is the amount of + refraction. + + 6) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 7) In cases where the caller does not wish to provide the Earth + Ephemeris, the Earth rotation information and refraction + constants, the function eraApco13 can be used instead of the + present function. This starts from UTC and weather readings etc. + and computes suitable values using other ERFA functions. + + 8) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 9) The context structure astrom produced by this function is used by + eraAtioq, eraAtoiq, eraAtciq* and eraAticq*. + + Called: + eraAper astrometry parameters: update ERA + eraC2ixys celestial-to-intermediate matrix, given X,Y and s + eraPvtob position/velocity of terrestrial station + eraTrxpv product of transpose of r-matrix and pv-vector + eraApcs astrometry parameters, ICRS-GCRS, space observer + eraCr copy r-matrix + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + ebpv_in = numpy.array(ebpv, dtype=numpy.double, order="C", copy=False, subok=True) + ehp_in = numpy.array(ehp, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + sp_in = numpy.array(sp, dtype=numpy.double, order="C", copy=False, subok=True) + refa_in = numpy.array(refa, dtype=numpy.double, order="C", copy=False, subok=True) + refb_in = numpy.array(refb, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(ebpv_in, (2, 3), "ebpv") + check_trailing_shape(ehp_in, (3,), "ehp") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], x_in, y_in, s_in, theta_in, elong_in, phi_in, hm_in, xp_in, yp_in, sp_in, refa_in, refb_in) + refa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + refb_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + numpy.copyto(refa_out, refa_in) + numpy.copyto(refb_out, refb_in) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], x_in, y_in, s_in, theta_in, elong_in, phi_in, hm_in, xp_in, yp_in, sp_in, refa_out, refb_out, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*14 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apco(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(refa_out.shape) > 0 and refa_out.shape[0] == 1 + refa_out = refa_out.reshape(refa_out.shape[1:]) + assert len(refb_out.shape) > 0 and refb_out.shape[0] == 1 + refb_out = refb_out.reshape(refb_out.shape[1:]) + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return refa_out, refb_out, astrom_out + + +def apco13(utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraApco13``. + + Parameters + ---------- + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + astrom : eraASTROM array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p c o 1 3 + - - - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between ICRS and observed + coordinates. The caller supplies UTC, site coordinates, ambient air + conditions and observing wavelength, and ERFA models are used to + obtain the Earth ephemeris, CIP/CIO and refraction constants. + + The parameters produced by this function are required in the + parallax, light deflection, aberration, and bias-precession-nutation + parts of the ICRS/CIRS transformations. + + Given: + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 1,2) + dut1 double UT1-UTC (seconds, Note 3) + elong double longitude (radians, east +ve, Note 4) + phi double latitude (geodetic, radians, Note 4) + hm double height above ellipsoid (m, geodetic, Notes 4,6) + xp,yp double polar motion coordinates (radians, Note 5) + phpa double pressure at the observer (hPa = mB, Note 6) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 7) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + eo double* equation of the origins (ERA-GST) + + Returned (function value): + int status: +1 = dubious year (Note 2) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 2) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 3) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 4) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 5) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + Internally, the polar motion is stored in a form rotated onto + the local meridian. + + 6) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 7) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 8) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 9) In cases where the caller wishes to supply his own Earth + ephemeris, Earth rotation information and refraction constants, + the function eraApco can be used instead of the present function. + + 10) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 11) The context structure astrom produced by this function is used + by eraAtioq, eraAtoiq, eraAtciq* and eraAticq*. + + Called: + eraUtctai UTC to TAI + eraTaitt TAI to TT + eraUtcut1 UTC to UT1 + eraEpv00 Earth position and velocity + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraEra00 Earth rotation angle, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraRefco refraction constants for given ambient conditions + eraApco astrometry parameters, ICRS-observed + eraEors equation of the origins, given NPB matrix and s + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, astrom_out, eo_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*12 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apco13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'apco13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return astrom_out, eo_out +STATUS_CODES['apco13'] = {1: 'dubious year (Note 2)', 0: 'OK', -1: 'unacceptable date'} + + + +def apcs(date1, date2, pv, ebpv, ehp): + """ + Wrapper for ERFA function ``eraApcs``. + + Parameters + ---------- + date1 : double array + date2 : double array + pv : double array + ebpv : double array + ehp : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p c s + - - - - - - - - + + For an observer whose geocentric position and velocity are known, + prepare star-independent astrometry parameters for transformations + between ICRS and GCRS. The Earth ephemeris is supplied by the + caller. + + The parameters produced by this function are required in the space + motion, parallax, light deflection and aberration parts of the + astrometric transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + pv double[2][3] observer's geocentric pos/vel (m, m/s) + ebpv double[2][3] Earth barycentric PV (au, au/day) + ehp double[3] Earth heliocentric P (au) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) Providing separate arguments for (i) the observer's geocentric + position and velocity and (ii) the Earth ephemeris is done for + convenience in the geocentric, terrestrial and Earth orbit cases. + For deep space applications it maybe more convenient to specify + zero geocentric position and velocity and to supply the + observer's position and velocity information directly instead of + with respect to the Earth. However, note the different units: + m and m/s for the geocentric vectors, au and au/day for the + heliocentric and barycentric vectors. + + 4) In cases where the caller does not wish to provide the Earth + ephemeris, the function eraApcs13 can be used instead of the + present function. This computes the Earth ephemeris using the + ERFA function eraEpv00. + + 5) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 6) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraCp copy p-vector + eraPm modulus of p-vector + eraPn decompose p-vector into modulus and direction + eraIr initialize r-matrix to identity + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + ebpv_in = numpy.array(ebpv, dtype=numpy.double, order="C", copy=False, subok=True) + ehp_in = numpy.array(ehp, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pv_in, (2, 3), "pv") + check_trailing_shape(ebpv_in, (2, 3), "ebpv") + check_trailing_shape(ehp_in, (3,), "ehp") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, pv_in[...,0,0], ebpv_in[...,0,0], ehp_in[...,0]) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, pv_in[...,0,0], ebpv_in[...,0,0], ehp_in[...,0], astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*5 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apcs(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apcs13(date1, date2, pv): + """ + Wrapper for ERFA function ``eraApcs13``. + + Parameters + ---------- + date1 : double array + date2 : double array + pv : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p c s 1 3 + - - - - - - - - - - + + For an observer whose geocentric position and velocity are known, + prepare star-independent astrometry parameters for transformations + between ICRS and GCRS. The Earth ephemeris is from ERFA models. + + The parameters produced by this function are required in the space + motion, parallax, light deflection and aberration parts of the + astrometric transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + pv double[2][3] observer's geocentric pos/vel (Note 3) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) The observer's position and velocity pv are geocentric but with + respect to BCRS axes, and in units of m and m/s. No assumptions + are made about proximity to the Earth, and the function can be + used for deep space applications as well as Earth orbit and + terrestrial. + + 4) In cases where the caller wishes to supply his own Earth + ephemeris, the function eraApcs can be used instead of the present + function. + + 5) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 6) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraEpv00 Earth position and velocity + eraApcs astrometry parameters, ICRS-GCRS, space observer + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, pv_in[...,0,0]) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, pv_in[...,0,0], astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apcs13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def aper(theta, astrom): + """ + Wrapper for ERFA function ``eraAper``. + + Parameters + ---------- + theta : double array + astrom : eraASTROM array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p e r + - - - - - - - - + + In the star-independent astrometry parameters, update only the + Earth rotation angle, supplied by the caller explicitly. + + Given: + theta double Earth rotation angle (radians, Note 2) + astrom eraASTROM* star-independent astrometry parameters: + pmt double not used + eb double[3] not used + eh double[3] not used + em double not used + v double[3] not used + bm1 double not used + bpn double[3][3] not used + along double longitude + s' (radians) + xpl double not used + ypl double not used + sphi double not used + cphi double not used + diurab double not used + eral double not used + refa double not used + refb double not used + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double unchanged + eb double[3] unchanged + eh double[3] unchanged + em double unchanged + v double[3] unchanged + bm1 double unchanged + bpn double[3][3] unchanged + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double "local" Earth rotation angle (radians) + refa double unchanged + refb double unchanged + + Notes: + + 1) This function exists to enable sidereal-tracking applications to + avoid wasteful recomputation of the bulk of the astrometry + parameters: only the Earth rotation is updated. + + 2) For targets expressed as equinox based positions, such as + classical geocentric apparent (RA,Dec), the supplied theta can be + Greenwich apparent sidereal time rather than Earth rotation + angle. + + 3) The function eraAper13 can be used instead of the present + function, and starts from UT1 rather than ERA itself. + + 4) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), theta_in, astrom_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + numpy.copyto(astrom_out, astrom_in) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [theta_in, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._aper(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def aper13(ut11, ut12, astrom): + """ + Wrapper for ERFA function ``eraAper13``. + + Parameters + ---------- + ut11 : double array + ut12 : double array + astrom : eraASTROM array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p e r 1 3 + - - - - - - - - - - + + In the star-independent astrometry parameters, update only the + Earth rotation angle. The caller provides UT1, (n.b. not UTC). + + Given: + ut11 double UT1 as a 2-part... + ut12 double ...Julian Date (Note 1) + astrom eraASTROM* star-independent astrometry parameters: + pmt double not used + eb double[3] not used + eh double[3] not used + em double not used + v double[3] not used + bm1 double not used + bpn double[3][3] not used + along double longitude + s' (radians) + xpl double not used + ypl double not used + sphi double not used + cphi double not used + diurab double not used + eral double not used + refa double not used + refb double not used + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double unchanged + eb double[3] unchanged + eh double[3] unchanged + em double unchanged + v double[3] unchanged + bm1 double unchanged + bpn double[3][3] unchanged + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double "local" Earth rotation angle (radians) + refa double unchanged + refb double unchanged + + Notes: + + 1) The UT1 date (n.b. not UTC) ut11+ut12 is a Julian Date, + apportioned in any convenient way between the arguments ut11 and + ut12. For example, JD(UT1)=2450123.7 could be expressed in any + of these ways, among others: + + ut11 ut12 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. The date & time method is + best matched to the algorithm used: maximum precision is + delivered when the ut11 argument is for 0hrs UT1 on the day in + question and the ut12 argument lies in the range 0 to 1, or vice + versa. + + 2) If the caller wishes to provide the Earth rotation angle itself, + the function eraAper can be used instead. One use of this + technique is to substitute Greenwich apparent sidereal time and + thereby to support equinox based transformations directly. + + 3) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + Called: + eraAper astrometry parameters: update ERA + eraEra00 Earth rotation angle, IAU 2000 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ut11_in = numpy.array(ut11, dtype=numpy.double, order="C", copy=False, subok=True) + ut12_in = numpy.array(ut12, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ut11_in, ut12_in, astrom_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + numpy.copyto(astrom_out, astrom_in) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ut11_in, ut12_in, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._aper13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apio(sp, theta, elong, phi, hm, xp, yp, refa, refb): + """ + Wrapper for ERFA function ``eraApio``. + + Parameters + ---------- + sp : double array + theta : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + refa : double array + refb : double array + + Returns + ------- + refa : double array + refb : double array + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p i o + - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between CIRS and observed + coordinates. The caller supplies the Earth orientation information + and the refraction constants as well as the site coordinates. + + Given: + sp double the TIO locator s' (radians, Note 1) + theta double Earth rotation angle (radians) + elong double longitude (radians, east +ve, Note 2) + phi double geodetic latitude (radians, Note 2) + hm double height above ellipsoid (m, geodetic Note 2) + xp,yp double polar motion coordinates (radians, Note 3) + refa double refraction constant A (radians, Note 4) + refb double refraction constant B (radians, Note 4) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double unchanged + eb double[3] unchanged + eh double[3] unchanged + em double unchanged + v double[3] unchanged + bm1 double unchanged + bpn double[3][3] unchanged + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Notes: + + 1) sp, the TIO locator s', is a tiny quantity needed only by the + most precise applications. It can either be set to zero or + predicted using the ERFA function eraSp00. + + 2) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 3) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many applications, + xp and yp can be set to zero. + + Internally, the polar motion is stored in a form rotated onto the + local meridian. + + 4) The refraction constants refa and refb are for use in a + dZ = A*tan(Z)+B*tan^3(Z) model, where Z is the observed + (i.e. refracted) zenith distance and dZ is the amount of + refraction. + + 5) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 6) In cases where the caller does not wish to provide the Earth + rotation information and refraction constants, the function + eraApio13 can be used instead of the present function. This + starts from UTC and weather readings etc. and computes suitable + values using other ERFA functions. + + 7) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 8) The context structure astrom produced by this function is used by + eraAtioq and eraAtoiq. + + Called: + eraPvtob position/velocity of terrestrial station + eraAper astrometry parameters: update ERA + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + sp_in = numpy.array(sp, dtype=numpy.double, order="C", copy=False, subok=True) + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + refa_in = numpy.array(refa, dtype=numpy.double, order="C", copy=False, subok=True) + refb_in = numpy.array(refb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), sp_in, theta_in, elong_in, phi_in, hm_in, xp_in, yp_in, refa_in, refb_in) + refa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + refb_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + numpy.copyto(refa_out, refa_in) + numpy.copyto(refb_out, refb_in) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [sp_in, theta_in, elong_in, phi_in, hm_in, xp_in, yp_in, refa_out, refb_out, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apio(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(refa_out.shape) > 0 and refa_out.shape[0] == 1 + refa_out = refa_out.reshape(refa_out.shape[1:]) + assert len(refb_out.shape) > 0 and refb_out.shape[0] == 1 + refb_out = refb_out.reshape(refb_out.shape[1:]) + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return refa_out, refb_out, astrom_out + + +def apio13(utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraApio13``. + + Parameters + ---------- + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p i o 1 3 + - - - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between CIRS and observed + coordinates. The caller supplies UTC, site coordinates, ambient air + conditions and observing wavelength. + + Given: + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 1,2) + dut1 double UT1-UTC (seconds) + elong double longitude (radians, east +ve, Note 3) + phi double geodetic latitude (radians, Note 3) + hm double height above ellipsoid (m, geodetic Notes 4,6) + xp,yp double polar motion coordinates (radians, Note 5) + phpa double pressure at the observer (hPa = mB, Note 6) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 7) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double unchanged + eb double[3] unchanged + eh double[3] unchanged + em double unchanged + v double[3] unchanged + bm1 double unchanged + bpn double[3][3] unchanged + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned (function value): + int status: +1 = dubious year (Note 2) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 2) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 3) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 4) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 5) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many applications, + xp and yp can be set to zero. + + Internally, the polar motion is stored in a form rotated onto + the local meridian. + + 6) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to the + pressure and that an accurate phpa value is important for + precise work. + + 7) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 8) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 9) In cases where the caller wishes to supply his own Earth + rotation information and refraction constants, the function + eraApc can be used instead of the present function. + + 10) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 11) The context structure astrom produced by this function is used + by eraAtioq and eraAtoiq. + + Called: + eraUtctai UTC to TAI + eraTaitt TAI to TT + eraUtcut1 UTC to UT1 + eraSp00 the TIO locator s', IERS 2000 + eraEra00 Earth rotation angle, IAU 2000 + eraRefco refraction constants for given ambient conditions + eraApio astrometry parameters, CIRS-observed + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, astrom_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*12 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apio13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'apio13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out +STATUS_CODES['apio13'] = {1: 'dubious year (Note 2)', 0: 'OK', -1: 'unacceptable date'} + + + +def atci13(rc, dc, pr, pd, px, rv, date1, date2): + """ + Wrapper for ERFA function ``eraAtci13``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + date1 : double array + date2 : double array + + Returns + ------- + ri : double array + di : double array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t c i 1 3 + - - - - - - - - - - + + Transform ICRS star data, epoch J2000.0, to CIRS. + + Given: + rc double ICRS right ascension at J2000.0 (radians, Note 1) + dc double ICRS declination at J2000.0 (radians, Note 1) + pr double RA proper motion (radians/year; Note 2) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 3) + + Returned: + ri,di double* CIRS geocentric RA,Dec (radians) + eo double* equation of the origins (ERA-GST, Note 5) + + Notes: + + 1) Star data for an epoch other than J2000.0 (for example from the + Hipparcos catalog, which has an epoch of J1991.25) will require a + preliminary call to eraPmsafe before use. + + 2) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 3) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 4) The available accuracy is better than 1 milliarcsecond, limited + mainly by the precession-nutation model that is used, namely + IAU 2000A/2006. Very close to solar system bodies, additional + errors of up to several milliarcseconds can occur because of + unmodeled light deflection; however, the Sun's contribution is + taken into account, to first order. The accuracy limitations of + the ERFA function eraEpv00 (used to compute Earth position and + velocity) can contribute aberration errors of up to + 5 microarcseconds. Light deflection at the Sun's limb is + uncertain at the 0.4 mas level. + + 5) Should the transformation to (equinox based) apparent place be + required rather than (CIO based) intermediate place, subtract the + equation of the origins from the returned right ascension: + RA = RI - EO. (The eraAnp function can then be applied, as + required, to keep the result in the conventional 0-2pi range.) + + Called: + eraApci13 astrometry parameters, ICRS-CIRS, 2013 + eraAtciq quick ICRS to CIRS + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, date1_in, date2_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, date1_in, date2_in, ri_out, di_out, eo_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*8 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atci13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return ri_out, di_out, eo_out + + +def atciq(rc, dc, pr, pd, px, rv, astrom): + """ + Wrapper for ERFA function ``eraAtciq``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + astrom : eraASTROM array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t c i q + - - - - - - - - - + + Quick ICRS, epoch J2000.0, to CIRS transformation, given precomputed + star-independent astrometry parameters. + + Use of this function is appropriate when efficiency is important and + where many star positions are to be transformed for one date. The + star-independent parameters can be obtained by calling one of the + functions eraApci[13], eraApcg[13], eraApco[13] or eraApcs[13]. + + If the parallax and proper motions are zero the eraAtciqz function + can be used instead. + + Given: + rc,dc double ICRS RA,Dec at J2000.0 (radians) + pr double RA proper motion (radians/year; Note 3) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + ri,di double CIRS RA,Dec (radians) + + Notes: + + 1) All the vectors are with respect to BCRS axes. + + 2) Star data for an epoch other than J2000.0 (for example from the + Hipparcos catalog, which has an epoch of J1991.25) will require a + preliminary call to eraPmsafe before use. + + 3) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + Called: + eraPmpx proper motion and parallax + eraLdsun light deflection by the Sun + eraAb stellar aberration + eraRxp product of r-matrix and pv-vector + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, astrom_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, astrom_in, ri_out, di_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atciq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out + + +def atciqn(rc, dc, pr, pd, px, rv, astrom, n, b): + """ + Wrapper for ERFA function ``eraAtciqn``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + astrom : eraASTROM array + n : int array + b : eraLDBODY array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t c i q n + - - - - - - - - - - + + Quick ICRS, epoch J2000.0, to CIRS transformation, given precomputed + star-independent astrometry parameters plus a list of light- + deflecting bodies. + + Use of this function is appropriate when efficiency is important and + where many star positions are to be transformed for one date. The + star-independent parameters can be obtained by calling one of the + functions eraApci[13], eraApcg[13], eraApco[13] or eraApcs[13]. + + + If the only light-deflecting body to be taken into account is the + Sun, the eraAtciq function can be used instead. If in addition the + parallax and proper motions are zero, the eraAtciqz function can be + used. + + Given: + rc,dc double ICRS RA,Dec at J2000.0 (radians) + pr double RA proper motion (radians/year; Note 3) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + n int number of bodies (Note 3) + b eraLDBODY[n] data for each of the n bodies (Notes 3,4): + bm double mass of the body (solar masses, Note 5) + dl double deflection limiter (Note 6) + pv [2][3] barycentric PV of the body (au, au/day) + + Returned: + ri,di double CIRS RA,Dec (radians) + + Notes: + + 1) Star data for an epoch other than J2000.0 (for example from the + Hipparcos catalog, which has an epoch of J1991.25) will require a + preliminary call to eraPmsafe before use. + + 2) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 3) The struct b contains n entries, one for each body to be + considered. If n = 0, no gravitational light deflection will be + applied, not even for the Sun. + + 4) The struct b should include an entry for the Sun as well as for + any planet or other body to be taken into account. The entries + should be in the order in which the light passes the body. + + 5) In the entry in the b struct for body i, the mass parameter + b[i].bm can, as required, be adjusted in order to allow for such + effects as quadrupole field. + + 6) The deflection limiter parameter b[i].dl is phi^2/2, where phi is + the angular separation (in radians) between star and body at + which limiting is applied. As phi shrinks below the chosen + threshold, the deflection is artificially reduced, reaching zero + for phi = 0. Example values suitable for a terrestrial + observer, together with masses, are as follows: + + body i b[i].bm b[i].dl + + Sun 1.0 6e-6 + Jupiter 0.00095435 3e-9 + Saturn 0.00028574 3e-10 + + 7) For efficiency, validation of the contents of the b array is + omitted. The supplied masses must be greater than zero, the + position and velocity vectors must be right, and the deflection + limiter greater than zero. + + Called: + eraPmpx proper motion and parallax + eraLdn light deflection by n bodies + eraAb stellar aberration + eraRxp product of r-matrix and pv-vector + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + b_in = numpy.array(b, dtype=dt_eraLDBODY, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, astrom_in, n_in, b_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, astrom_in, n_in, b_in, ri_out, di_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*9 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atciqn(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out + + +def atciqz(rc, dc, astrom): + """ + Wrapper for ERFA function ``eraAtciqz``. + + Parameters + ---------- + rc : double array + dc : double array + astrom : eraASTROM array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t c i q z + - - - - - - - - - - + + Quick ICRS to CIRS transformation, given precomputed star- + independent astrometry parameters, and assuming zero parallax and + proper motion. + + Use of this function is appropriate when efficiency is important and + where many star positions are to be transformed for one date. The + star-independent parameters can be obtained by calling one of the + functions eraApci[13], eraApcg[13], eraApco[13] or eraApcs[13]. + + The corresponding function for the case of non-zero parallax and + proper motion is eraAtciq. + + Given: + rc,dc double ICRS astrometric RA,Dec (radians) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + ri,di double CIRS RA,Dec (radians) + + Note: + + All the vectors are with respect to BCRS axes. + + References: + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013). + + Klioner, Sergei A., "A practical relativistic model for micro- + arcsecond astrometry in space", Astr. J. 125, 1580-1597 (2003). + + Called: + eraS2c spherical coordinates to unit vector + eraLdsun light deflection due to Sun + eraAb stellar aberration + eraRxp product of r-matrix and p-vector + eraC2s p-vector to spherical + eraAnp normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, astrom_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, astrom_in, ri_out, di_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atciqz(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out + + +def atco13(rc, dc, pr, pd, px, rv, utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraAtco13``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + aob : double array + zob : double array + hob : double array + dob : double array + rob : double array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t c o 1 3 + - - - - - - - - - - + + ICRS RA,Dec to observed place. The caller supplies UTC, site + coordinates, ambient air conditions and observing wavelength. + + ERFA models are used for the Earth ephemeris, bias-precession- + nutation, Earth orientation and refraction. + + Given: + rc,dc double ICRS right ascension at J2000.0 (radians, Note 1) + pr double RA proper motion (radians/year; Note 2) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 3-4) + dut1 double UT1-UTC (seconds, Note 5) + elong double longitude (radians, east +ve, Note 6) + phi double latitude (geodetic, radians, Note 6) + hm double height above ellipsoid (m, geodetic, Notes 6,8) + xp,yp double polar motion coordinates (radians, Note 7) + phpa double pressure at the observer (hPa = mB, Note 8) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 9) + + Returned: + aob double* observed azimuth (radians: N=0,E=90) + zob double* observed zenith distance (radians) + hob double* observed hour angle (radians) + dob double* observed declination (radians) + rob double* observed right ascension (CIO-based, radians) + eo double* equation of the origins (ERA-GST) + + Returned (function value): + int status: +1 = dubious year (Note 4) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) Star data for an epoch other than J2000.0 (for example from the + Hipparcos catalog, which has an epoch of J1991.25) will require + a preliminary call to eraPmsafe before use. + + 2) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 3) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 4) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 5) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 6) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 7) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + 8) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), + is available, an adequate estimate of hm can be obtained from + the expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 9) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 10) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted observed + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtco13 and + eraAtoc13 are self-consistent to better than 1 microarcsecond + all over the celestial sphere. With refraction included, + consistency falls off at high zenith distances, but is still + better than 0.05 arcsec at 85 degrees. + + 11) "Observed" Az,ZD means the position that would be seen by a + perfect geodetically aligned theodolite. (Zenith distance is + used rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) This is + related to the observed HA,Dec via the standard rotation, using + the geodetic latitude (corrected for polar motion), while the + observed HA and RA are related simply through the Earth rotation + angle and the site longitude. "Observed" RA,Dec or HA,Dec thus + means the position that would be seen by a perfect equatorial + with its polar axis aligned to the Earth's axis of rotation. + + 12) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraApco13 astrometry parameters, ICRS-observed, 2013 + eraAtciq quick ICRS to CIRS + eraAtioq quick CIRS to observed + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + aob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + zob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + hob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, aob_out, zob_out, hob_out, dob_out, rob_out, eo_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*18 + [['readwrite']]*7 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atco13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'atco13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(aob_out.shape) > 0 and aob_out.shape[0] == 1 + aob_out = aob_out.reshape(aob_out.shape[1:]) + assert len(zob_out.shape) > 0 and zob_out.shape[0] == 1 + zob_out = zob_out.reshape(zob_out.shape[1:]) + assert len(hob_out.shape) > 0 and hob_out.shape[0] == 1 + hob_out = hob_out.reshape(hob_out.shape[1:]) + assert len(dob_out.shape) > 0 and dob_out.shape[0] == 1 + dob_out = dob_out.reshape(dob_out.shape[1:]) + assert len(rob_out.shape) > 0 and rob_out.shape[0] == 1 + rob_out = rob_out.reshape(rob_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return aob_out, zob_out, hob_out, dob_out, rob_out, eo_out +STATUS_CODES['atco13'] = {1: 'dubious year (Note 4)', 0: 'OK', -1: 'unacceptable date'} + + + +def atic13(ri, di, date1, date2): + """ + Wrapper for ERFA function ``eraAtic13``. + + Parameters + ---------- + ri : double array + di : double array + date1 : double array + date2 : double array + + Returns + ------- + rc : double array + dc : double array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t i c 1 3 + - - - - - - - - - - + + Transform star RA,Dec from geocentric CIRS to ICRS astrometric. + + Given: + ri,di double CIRS geocentric RA,Dec (radians) + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + + Returned: + rc,dc double ICRS astrometric RA,Dec (radians) + eo double equation of the origins (ERA-GST, Note 4) + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) Iterative techniques are used for the aberration and light + deflection corrections so that the functions eraAtic13 (or + eraAticq) and eraAtci13 (or eraAtciq) are accurate inverses; + even at the edge of the Sun's disk the discrepancy is only about + 1 nanoarcsecond. + + 3) The available accuracy is better than 1 milliarcsecond, limited + mainly by the precession-nutation model that is used, namely + IAU 2000A/2006. Very close to solar system bodies, additional + errors of up to several milliarcseconds can occur because of + unmodeled light deflection; however, the Sun's contribution is + taken into account, to first order. The accuracy limitations of + the ERFA function eraEpv00 (used to compute Earth position and + velocity) can contribute aberration errors of up to + 5 microarcseconds. Light deflection at the Sun's limb is + uncertain at the 0.4 mas level. + + 4) Should the transformation to (equinox based) J2000.0 mean place + be required rather than (CIO based) ICRS coordinates, subtract the + equation of the origins from the returned right ascension: + RA = RI - EO. (The eraAnp function can then be applied, as + required, to keep the result in the conventional 0-2pi range.) + + Called: + eraApci13 astrometry parameters, ICRS-CIRS, 2013 + eraAticq quick CIRS to ICRS astrometric + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, date1_in, date2_in) + rc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, date1_in, date2_in, rc_out, dc_out, eo_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atic13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc_out.shape) > 0 and rc_out.shape[0] == 1 + rc_out = rc_out.reshape(rc_out.shape[1:]) + assert len(dc_out.shape) > 0 and dc_out.shape[0] == 1 + dc_out = dc_out.reshape(dc_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return rc_out, dc_out, eo_out + + +def aticq(ri, di, astrom): + """ + Wrapper for ERFA function ``eraAticq``. + + Parameters + ---------- + ri : double array + di : double array + astrom : eraASTROM array + + Returns + ------- + rc : double array + dc : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t i c q + - - - - - - - - - + + Quick CIRS RA,Dec to ICRS astrometric place, given the star- + independent astrometry parameters. + + Use of this function is appropriate when efficiency is important and + where many star positions are all to be transformed for one date. + The star-independent astrometry parameters can be obtained by + calling one of the functions eraApci[13], eraApcg[13], eraApco[13] + or eraApcs[13]. + + Given: + ri,di double CIRS RA,Dec (radians) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + rc,dc double ICRS astrometric RA,Dec (radians) + + Notes: + + 1) Only the Sun is taken into account in the light deflection + correction. + + 2) Iterative techniques are used for the aberration and light + deflection corrections so that the functions eraAtic13 (or + eraAticq) and eraAtci13 (or eraAtciq) are accurate inverses; + even at the edge of the Sun's disk the discrepancy is only about + 1 nanoarcsecond. + + Called: + eraS2c spherical coordinates to unit vector + eraTrxp product of transpose of r-matrix and p-vector + eraZp zero p-vector + eraAb stellar aberration + eraLdsun light deflection by the Sun + eraC2s p-vector to spherical + eraAnp normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, astrom_in) + rc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, astrom_in, rc_out, dc_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._aticq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc_out.shape) > 0 and rc_out.shape[0] == 1 + rc_out = rc_out.reshape(rc_out.shape[1:]) + assert len(dc_out.shape) > 0 and dc_out.shape[0] == 1 + dc_out = dc_out.reshape(dc_out.shape[1:]) + + return rc_out, dc_out + + +def aticqn(ri, di, astrom, n, b): + """ + Wrapper for ERFA function ``eraAticqn``. + + Parameters + ---------- + ri : double array + di : double array + astrom : eraASTROM array + n : int array + b : eraLDBODY array + + Returns + ------- + rc : double array + dc : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t i c q n + - - - - - - - - - + + Quick CIRS to ICRS astrometric place transformation, given the star- + independent astrometry parameters plus a list of light-deflecting + bodies. + + Use of this function is appropriate when efficiency is important and + where many star positions are all to be transformed for one date. + The star-independent astrometry parameters can be obtained by + calling one of the functions eraApci[13], eraApcg[13], eraApco[13] + or eraApcs[13]. +* +* If the only light-deflecting body to be taken into account is the +* Sun, the eraAticq function can be used instead. + + Given: + ri,di double CIRS RA,Dec (radians) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + n int number of bodies (Note 3) + b eraLDBODY[n] data for each of the n bodies (Notes 3,4): + bm double mass of the body (solar masses, Note 5) + dl double deflection limiter (Note 6) + pv [2][3] barycentric PV of the body (au, au/day) + + Returned: + rc,dc double ICRS astrometric RA,Dec (radians) + + Notes: + + 1) Iterative techniques are used for the aberration and light + deflection corrections so that the functions eraAticqn and + eraAtciqn are accurate inverses; even at the edge of the Sun's + disk the discrepancy is only about 1 nanoarcsecond. + + 2) If the only light-deflecting body to be taken into account is the + Sun, the eraAticq function can be used instead. + + 3) The struct b contains n entries, one for each body to be + considered. If n = 0, no gravitational light deflection will be + applied, not even for the Sun. + + 4) The struct b should include an entry for the Sun as well as for + any planet or other body to be taken into account. The entries + should be in the order in which the light passes the body. + + 5) In the entry in the b struct for body i, the mass parameter + b[i].bm can, as required, be adjusted in order to allow for such + effects as quadrupole field. + + 6) The deflection limiter parameter b[i].dl is phi^2/2, where phi is + the angular separation (in radians) between star and body at + which limiting is applied. As phi shrinks below the chosen + threshold, the deflection is artificially reduced, reaching zero + for phi = 0. Example values suitable for a terrestrial + observer, together with masses, are as follows: + + body i b[i].bm b[i].dl + + Sun 1.0 6e-6 + Jupiter 0.00095435 3e-9 + Saturn 0.00028574 3e-10 + + 7) For efficiency, validation of the contents of the b array is + omitted. The supplied masses must be greater than zero, the + position and velocity vectors must be right, and the deflection + limiter greater than zero. + + Called: + eraS2c spherical coordinates to unit vector + eraTrxp product of transpose of r-matrix and p-vector + eraZp zero p-vector + eraAb stellar aberration + eraLdn light deflection by n bodies + eraC2s p-vector to spherical + eraAnp normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + b_in = numpy.array(b, dtype=dt_eraLDBODY, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, astrom_in, n_in, b_in) + rc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, astrom_in, n_in, b_in, rc_out, dc_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*5 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._aticqn(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc_out.shape) > 0 and rc_out.shape[0] == 1 + rc_out = rc_out.reshape(rc_out.shape[1:]) + assert len(dc_out.shape) > 0 and dc_out.shape[0] == 1 + dc_out = dc_out.reshape(dc_out.shape[1:]) + + return rc_out, dc_out + + +def atio13(ri, di, utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraAtio13``. + + Parameters + ---------- + ri : double array + di : double array + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + aob : double array + zob : double array + hob : double array + dob : double array + rob : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t i o 1 3 + - - - - - - - - - - + + CIRS RA,Dec to observed place. The caller supplies UTC, site + coordinates, ambient air conditions and observing wavelength. + + Given: + ri double CIRS right ascension (CIO-based, radians) + di double CIRS declination (radians) + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 1,2) + dut1 double UT1-UTC (seconds, Note 3) + elong double longitude (radians, east +ve, Note 4) + phi double geodetic latitude (radians, Note 4) + hm double height above ellipsoid (m, geodetic Notes 4,6) + xp,yp double polar motion coordinates (radians, Note 5) + phpa double pressure at the observer (hPa = mB, Note 6) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 7) + + Returned: + aob double* observed azimuth (radians: N=0,E=90) + zob double* observed zenith distance (radians) + hob double* observed hour angle (radians) + dob double* observed declination (radians) + rob double* observed right ascension (CIO-based, radians) + + Returned (function value): + int status: +1 = dubious year (Note 2) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 2) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 3) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 4) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 5) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + 6) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 7) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 8) "Observed" Az,ZD means the position that would be seen by a + perfect geodetically aligned theodolite. (Zenith distance is + used rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) This is + related to the observed HA,Dec via the standard rotation, using + the geodetic latitude (corrected for polar motion), while the + observed HA and RA are related simply through the Earth rotation + angle and the site longitude. "Observed" RA,Dec or HA,Dec thus + means the position that would be seen by a perfect equatorial + with its polar axis aligned to the Earth's axis of rotation. + + 9) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted astrometric + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + 10) The complementary functions eraAtio13 and eraAtoi13 are self- + consistent to better than 1 microarcsecond all over the + celestial sphere. + + 11) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraApio13 astrometry parameters, CIRS-observed, 2013 + eraAtioq quick CIRS to observed + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + aob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + zob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + hob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, aob_out, zob_out, hob_out, dob_out, rob_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*14 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atio13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'atio13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(aob_out.shape) > 0 and aob_out.shape[0] == 1 + aob_out = aob_out.reshape(aob_out.shape[1:]) + assert len(zob_out.shape) > 0 and zob_out.shape[0] == 1 + zob_out = zob_out.reshape(zob_out.shape[1:]) + assert len(hob_out.shape) > 0 and hob_out.shape[0] == 1 + hob_out = hob_out.reshape(hob_out.shape[1:]) + assert len(dob_out.shape) > 0 and dob_out.shape[0] == 1 + dob_out = dob_out.reshape(dob_out.shape[1:]) + assert len(rob_out.shape) > 0 and rob_out.shape[0] == 1 + rob_out = rob_out.reshape(rob_out.shape[1:]) + + return aob_out, zob_out, hob_out, dob_out, rob_out +STATUS_CODES['atio13'] = {1: 'dubious year (Note 2)', 0: 'OK', -1: 'unacceptable date'} + + + +def atioq(ri, di, astrom): + """ + Wrapper for ERFA function ``eraAtioq``. + + Parameters + ---------- + ri : double array + di : double array + astrom : eraASTROM array + + Returns + ------- + aob : double array + zob : double array + hob : double array + dob : double array + rob : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t i o q + - - - - - - - - - + + Quick CIRS to observed place transformation. + + Use of this function is appropriate when efficiency is important and + where many star positions are all to be transformed for one date. + The star-independent astrometry parameters can be obtained by + calling eraApio[13] or eraApco[13]. + + Given: + ri double CIRS right ascension + di double CIRS declination + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + aob double* observed azimuth (radians: N=0,E=90) + zob double* observed zenith distance (radians) + hob double* observed hour angle (radians) + dob double* observed declination (radians) + rob double* observed right ascension (CIO-based, radians) + + Notes: + + 1) This function returns zenith distance rather than altitude in + order to reflect the fact that no allowance is made for + depression of the horizon. + + 2) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted observed + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtioq and + eraAtoiq are self-consistent to better than 1 microarcsecond all + over the celestial sphere. With refraction included, consistency + falls off at high zenith distances, but is still better than + 0.05 arcsec at 85 degrees. + + 3) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 4) The CIRS RA,Dec is obtained from a star catalog mean place by + allowing for space motion, parallax, the Sun's gravitational lens + effect, annual aberration and precession-nutation. For star + positions in the ICRS, these effects can be applied by means of + the eraAtci13 (etc.) functions. Starting from classical "mean + place" systems, additional transformations will be needed first. + + 5) "Observed" Az,El means the position that would be seen by a + perfect geodetically aligned theodolite. This is obtained from + the CIRS RA,Dec by allowing for Earth orientation and diurnal + aberration, rotating from equator to horizon coordinates, and + then adjusting for refraction. The HA,Dec is obtained by + rotating back into equatorial coordinates, and is the position + that would be seen by a perfect equatorial with its polar axis + aligned to the Earth's axis of rotation. Finally, the RA is + obtained by subtracting the HA from the local ERA. + + 6) The star-independent CIRS-to-observed-place parameters in ASTROM + may be computed with eraApio[13] or eraApco[13]. If nothing has + changed significantly except the time, eraAper[13] may be used to + perform the requisite adjustment to the astrom structure. + + Called: + eraS2c spherical coordinates to unit vector + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, astrom_in) + aob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + zob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + hob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, astrom_in, aob_out, zob_out, hob_out, dob_out, rob_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*5 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atioq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(aob_out.shape) > 0 and aob_out.shape[0] == 1 + aob_out = aob_out.reshape(aob_out.shape[1:]) + assert len(zob_out.shape) > 0 and zob_out.shape[0] == 1 + zob_out = zob_out.reshape(zob_out.shape[1:]) + assert len(hob_out.shape) > 0 and hob_out.shape[0] == 1 + hob_out = hob_out.reshape(hob_out.shape[1:]) + assert len(dob_out.shape) > 0 and dob_out.shape[0] == 1 + dob_out = dob_out.reshape(dob_out.shape[1:]) + assert len(rob_out.shape) > 0 and rob_out.shape[0] == 1 + rob_out = rob_out.reshape(rob_out.shape[1:]) + + return aob_out, zob_out, hob_out, dob_out, rob_out + + +def atoc13(type, ob1, ob2, utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraAtoc13``. + + Parameters + ---------- + type : const char array + ob1 : double array + ob2 : double array + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + rc : double array + dc : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t o c 1 3 + - - - - - - - - - - + + Observed place at a groundbased site to to ICRS astrometric RA,Dec. + The caller supplies UTC, site coordinates, ambient air conditions + and observing wavelength. + + Given: + type char[] type of coordinates - "R", "H" or "A" (Notes 1,2) + ob1 double observed Az, HA or RA (radians; Az is N=0,E=90) + ob2 double observed ZD or Dec (radians) + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 3,4) + dut1 double UT1-UTC (seconds, Note 5) + elong double longitude (radians, east +ve, Note 6) + phi double geodetic latitude (radians, Note 6) + hm double height above ellipsoid (m, geodetic Notes 6,8) + xp,yp double polar motion coordinates (radians, Note 7) + phpa double pressure at the observer (hPa = mB, Note 8) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 9) + + Returned: + rc,dc double ICRS astrometric RA,Dec (radians) + + Returned (function value): + int status: +1 = dubious year (Note 4) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) "Observed" Az,ZD means the position that would be seen by a + perfect geodetically aligned theodolite. (Zenith distance is + used rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) This is + related to the observed HA,Dec via the standard rotation, using + the geodetic latitude (corrected for polar motion), while the + observed HA and RA are related simply through the Earth rotation + angle and the site longitude. "Observed" RA,Dec or HA,Dec thus + means the position that would be seen by a perfect equatorial + with its polar axis aligned to the Earth's axis of rotation. + + 2) Only the first character of the type argument is significant. + "R" or "r" indicates that ob1 and ob2 are the observed right + ascension and declination; "H" or "h" indicates that they are + hour angle (west +ve) and declination; anything else ("A" or + "a" is recommended) indicates that ob1 and ob2 are azimuth + (north zero, east 90 deg) and zenith distance. + + 3) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 4) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 5) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 6) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 7) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + 8) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 9) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 10) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted astrometric + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtco13 and + eraAtoc13 are self-consistent to better than 1 microarcsecond + all over the celestial sphere. With refraction included, + consistency falls off at high zenith distances, but is still + better than 0.05 arcsec at 85 degrees. + + 11) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraApco13 astrometry parameters, ICRS-observed + eraAtoiq quick observed to CIRS + eraAticq quick CIRS to ICRS + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + type_in = numpy.array(type, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + ob1_in = numpy.array(ob1, dtype=numpy.double, order="C", copy=False, subok=True) + ob2_in = numpy.array(ob2, dtype=numpy.double, order="C", copy=False, subok=True) + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), type_in, ob1_in, ob2_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + rc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [type_in, ob1_in, ob2_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, rc_out, dc_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*15 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atoc13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'atoc13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc_out.shape) > 0 and rc_out.shape[0] == 1 + rc_out = rc_out.reshape(rc_out.shape[1:]) + assert len(dc_out.shape) > 0 and dc_out.shape[0] == 1 + dc_out = dc_out.reshape(dc_out.shape[1:]) + + return rc_out, dc_out +STATUS_CODES['atoc13'] = {1: 'dubious year (Note 4)', 0: 'OK', -1: 'unacceptable date'} + + + +def atoi13(type, ob1, ob2, utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraAtoi13``. + + Parameters + ---------- + type : const char array + ob1 : double array + ob2 : double array + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t o i 1 3 + - - - - - - - - - - + + Observed place to CIRS. The caller supplies UTC, site coordinates, + ambient air conditions and observing wavelength. + + Given: + type char[] type of coordinates - "R", "H" or "A" (Notes 1,2) + ob1 double observed Az, HA or RA (radians; Az is N=0,E=90) + ob2 double observed ZD or Dec (radians) + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 3,4) + dut1 double UT1-UTC (seconds, Note 5) + elong double longitude (radians, east +ve, Note 6) + phi double geodetic latitude (radians, Note 6) + hm double height above the ellipsoid (meters, Notes 6,8) + xp,yp double polar motion coordinates (radians, Note 7) + phpa double pressure at the observer (hPa = mB, Note 8) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 9) + + Returned: + ri double* CIRS right ascension (CIO-based, radians) + di double* CIRS declination (radians) + + Returned (function value): + int status: +1 = dubious year (Note 2) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) "Observed" Az,ZD means the position that would be seen by a + perfect geodetically aligned theodolite. (Zenith distance is + used rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) This is + related to the observed HA,Dec via the standard rotation, using + the geodetic latitude (corrected for polar motion), while the + observed HA and RA are related simply through the Earth rotation + angle and the site longitude. "Observed" RA,Dec or HA,Dec thus + means the position that would be seen by a perfect equatorial + with its polar axis aligned to the Earth's axis of rotation. + + 2) Only the first character of the type argument is significant. + "R" or "r" indicates that ob1 and ob2 are the observed right + ascension and declination; "H" or "h" indicates that they are + hour angle (west +ve) and declination; anything else ("A" or + "a" is recommended) indicates that ob1 and ob2 are azimuth + (north zero, east 90 deg) and zenith distance. + + 3) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 4) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 5) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 6) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 7) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + 8) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 9) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 10) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted astrometric + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtio13 and + eraAtoi13 are self-consistent to better than 1 microarcsecond + all over the celestial sphere. With refraction included, + consistency falls off at high zenith distances, but is still + better than 0.05 arcsec at 85 degrees. + + 12) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraApio13 astrometry parameters, CIRS-observed, 2013 + eraAtoiq quick observed to CIRS + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + type_in = numpy.array(type, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + ob1_in = numpy.array(ob1, dtype=numpy.double, order="C", copy=False, subok=True) + ob2_in = numpy.array(ob2, dtype=numpy.double, order="C", copy=False, subok=True) + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), type_in, ob1_in, ob2_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [type_in, ob1_in, ob2_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, ri_out, di_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*15 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atoi13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'atoi13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out +STATUS_CODES['atoi13'] = {1: 'dubious year (Note 2)', 0: 'OK', -1: 'unacceptable date'} + + + +def atoiq(type, ob1, ob2, astrom): + """ + Wrapper for ERFA function ``eraAtoiq``. + + Parameters + ---------- + type : const char array + ob1 : double array + ob2 : double array + astrom : eraASTROM array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t o i q + - - - - - - - - - + + Quick observed place to CIRS, given the star-independent astrometry + parameters. + + Use of this function is appropriate when efficiency is important and + where many star positions are all to be transformed for one date. + The star-independent astrometry parameters can be obtained by + calling eraApio[13] or eraApco[13]. + + Given: + type char[] type of coordinates: "R", "H" or "A" (Note 1) + ob1 double observed Az, HA or RA (radians; Az is N=0,E=90) + ob2 double observed ZD or Dec (radians) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + ri double* CIRS right ascension (CIO-based, radians) + di double* CIRS declination (radians) + + Notes: + + 1) "Observed" Az,El means the position that would be seen by a + perfect geodetically aligned theodolite. This is related to + the observed HA,Dec via the standard rotation, using the geodetic + latitude (corrected for polar motion), while the observed HA and + RA are related simply through the Earth rotation angle and the + site longitude. "Observed" RA,Dec or HA,Dec thus means the + position that would be seen by a perfect equatorial with its + polar axis aligned to the Earth's axis of rotation. By removing + from the observed place the effects of atmospheric refraction and + diurnal aberration, the CIRS RA,Dec is obtained. + + 2) Only the first character of the type argument is significant. + "R" or "r" indicates that ob1 and ob2 are the observed right + ascension and declination; "H" or "h" indicates that they are + hour angle (west +ve) and declination; anything else ("A" or + "a" is recommended) indicates that ob1 and ob2 are azimuth (north + zero, east 90 deg) and zenith distance. (Zenith distance is used + rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) + + 3) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted observed + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better than + 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtioq and + eraAtoiq are self-consistent to better than 1 microarcsecond all + over the celestial sphere. With refraction included, consistency + falls off at high zenith distances, but is still better than + 0.05 arcsec at 85 degrees. + + 4) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraS2c spherical coordinates to unit vector + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + type_in = numpy.array(type, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + ob1_in = numpy.array(ob1, dtype=numpy.double, order="C", copy=False, subok=True) + ob2_in = numpy.array(ob2, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), type_in, ob1_in, ob2_in, astrom_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [type_in, ob1_in, ob2_in, astrom_in, ri_out, di_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atoiq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out + + +def ld(bm, p, q, e, em, dlim): + """ + Wrapper for ERFA function ``eraLd``. + + Parameters + ---------- + bm : double array + p : double array + q : double array + e : double array + em : double array + dlim : double array + + Returns + ------- + p1 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - + e r a L d + - - - - - - + + Apply light deflection by a solar-system body, as part of + transforming coordinate direction into natural direction. + + Given: + bm double mass of the gravitating body (solar masses) + p double[3] direction from observer to source (unit vector) + q double[3] direction from body to source (unit vector) + e double[3] direction from body to observer (unit vector) + em double distance from body to observer (au) + dlim double deflection limiter (Note 4) + + Returned: + p1 double[3] observer to deflected source (unit vector) + + Notes: + + 1) The algorithm is based on Expr. (70) in Klioner (2003) and + Expr. (7.63) in the Explanatory Supplement (Urban & Seidelmann + 2013), with some rearrangement to minimize the effects of machine + precision. + + 2) The mass parameter bm can, as required, be adjusted in order to + allow for such effects as quadrupole field. + + 3) The barycentric position of the deflecting body should ideally + correspond to the time of closest approach of the light ray to + the body. + + 4) The deflection limiter parameter dlim is phi^2/2, where phi is + the angular separation (in radians) between source and body at + which limiting is applied. As phi shrinks below the chosen + threshold, the deflection is artificially reduced, reaching zero + for phi = 0. + + 5) The returned vector p1 is not normalized, but the consequential + departure from unit magnitude is always negligible. + + 6) The arguments p and p1 can be the same array. + + 7) To accumulate total light deflection taking into account the + contributions from several bodies, call the present function for + each body in succession, in decreasing order of distance from the + observer. + + 8) For efficiency, validation is omitted. The supplied vectors must + be of unit magnitude, and the deflection limiter non-zero and + positive. + + References: + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013). + + Klioner, Sergei A., "A practical relativistic model for micro- + arcsecond astrometry in space", Astr. J. 125, 1580-1597 (2003). + + Called: + eraPdp scalar product of two p-vectors + eraPxp vector product of two p-vectors + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + bm_in = numpy.array(bm, dtype=numpy.double, order="C", copy=False, subok=True) + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + q_in = numpy.array(q, dtype=numpy.double, order="C", copy=False, subok=True) + e_in = numpy.array(e, dtype=numpy.double, order="C", copy=False, subok=True) + em_in = numpy.array(em, dtype=numpy.double, order="C", copy=False, subok=True) + dlim_in = numpy.array(dlim, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(p_in, (3,), "p") + check_trailing_shape(q_in, (3,), "q") + check_trailing_shape(e_in, (3,), "e") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), bm_in, p_in[...,0], q_in[...,0], e_in[...,0], em_in, dlim_in) + p1_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [bm_in, p_in[...,0], q_in[...,0], e_in[...,0], em_in, dlim_in, p1_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ld(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(p1_out.shape) > 0 and p1_out.shape[0] == 1 + p1_out = p1_out.reshape(p1_out.shape[1:]) + + return p1_out + + +def ldn(n, b, ob, sc): + """ + Wrapper for ERFA function ``eraLdn``. + + Parameters + ---------- + n : int array + b : eraLDBODY array + ob : double array + sc : double array + + Returns + ------- + sn : double array + + Notes + ----- + The ERFA documentation is below. + +/*+ + - - - - - - - + e r a L d n + - - - - - - - + + For a star, apply light deflection by multiple solar-system bodies, + as part of transforming coordinate direction into natural direction. + + Given: + n int number of bodies (note 1) + b eraLDBODY[n] data for each of the n bodies (Notes 1,2): + bm double mass of the body (solar masses, Note 3) + dl double deflection limiter (Note 4) + pv [2][3] barycentric PV of the body (au, au/day) + ob double[3] barycentric position of the observer (au) + sc double[3] observer to star coord direction (unit vector) + + Returned: + sn double[3] observer to deflected star (unit vector) + + 1) The array b contains n entries, one for each body to be + considered. If n = 0, no gravitational light deflection will be + applied, not even for the Sun. + + 2) The array b should include an entry for the Sun as well as for + any planet or other body to be taken into account. The entries + should be in the order in which the light passes the body. + + 3) In the entry in the b array for body i, the mass parameter + b[i].bm can, as required, be adjusted in order to allow for such + effects as quadrupole field. + + 4) The deflection limiter parameter b[i].dl is phi^2/2, where phi is + the angular separation (in radians) between star and body at + which limiting is applied. As phi shrinks below the chosen + threshold, the deflection is artificially reduced, reaching zero + for phi = 0. Example values suitable for a terrestrial + observer, together with masses, are as follows: + + body i b[i].bm b[i].dl + + Sun 1.0 6e-6 + Jupiter 0.00095435 3e-9 + Saturn 0.00028574 3e-10 + + 5) For cases where the starlight passes the body before reaching the + observer, the body is placed back along its barycentric track by + the light time from that point to the observer. For cases where + the body is "behind" the observer no such shift is applied. If + a different treatment is preferred, the user has the option of + instead using the eraLd function. Similarly, eraLd can be used + for cases where the source is nearby, not a star. + + 6) The returned vector sn is not normalized, but the consequential + departure from unit magnitude is always negligible. + + 7) The arguments sc and sn can be the same array. + + 8) For efficiency, validation is omitted. The supplied masses must + be greater than zero, the position and velocity vectors must be + right, and the deflection limiter greater than zero. + + Reference: + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013), Section 7.2.4. + + Called: + eraCp copy p-vector + eraPdp scalar product of two p-vectors + eraPmp p-vector minus p-vector + eraPpsp p-vector plus scaled p-vector + eraPn decompose p-vector into modulus and direction + eraLd light deflection by a solar-system body + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + b_in = numpy.array(b, dtype=dt_eraLDBODY, order="C", copy=False, subok=True) + ob_in = numpy.array(ob, dtype=numpy.double, order="C", copy=False, subok=True) + sc_in = numpy.array(sc, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(ob_in, (3,), "ob") + check_trailing_shape(sc_in, (3,), "sc") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), n_in, b_in, ob_in[...,0], sc_in[...,0]) + sn_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [n_in, b_in, ob_in[...,0], sc_in[...,0], sn_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ldn(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(sn_out.shape) > 0 and sn_out.shape[0] == 1 + sn_out = sn_out.reshape(sn_out.shape[1:]) + + return sn_out + + +def ldsun(p, e, em): + """ + Wrapper for ERFA function ``eraLdsun``. + + Parameters + ---------- + p : double array + e : double array + em : double array + + Returns + ------- + p1 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a L d s u n + - - - - - - - - - + + Deflection of starlight by the Sun. + + Given: + p double[3] direction from observer to star (unit vector) + e double[3] direction from Sun to observer (unit vector) + em double distance from Sun to observer (au) + + Returned: + p1 double[3] observer to deflected star (unit vector) + + Notes: + + 1) The source is presumed to be sufficiently distant that its + directions seen from the Sun and the observer are essentially + the same. + + 2) The deflection is restrained when the angle between the star and + the center of the Sun is less than a threshold value, falling to + zero deflection for zero separation. The chosen threshold value + is within the solar limb for all solar-system applications, and + is about 5 arcminutes for the case of a terrestrial observer. + + 3) The arguments p and p1 can be the same array. + + Called: + eraLd light deflection by a solar-system body + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + e_in = numpy.array(e, dtype=numpy.double, order="C", copy=False, subok=True) + em_in = numpy.array(em, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(p_in, (3,), "p") + check_trailing_shape(e_in, (3,), "e") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), p_in[...,0], e_in[...,0], em_in) + p1_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [p_in[...,0], e_in[...,0], em_in, p1_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ldsun(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(p1_out.shape) > 0 and p1_out.shape[0] == 1 + p1_out = p1_out.reshape(p1_out.shape[1:]) + + return p1_out + + +def pmpx(rc, dc, pr, pd, px, rv, pmt, pob): + """ + Wrapper for ERFA function ``eraPmpx``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + pmt : double array + pob : double array + + Returns + ------- + pco : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P m p x + - - - - - - - - + + Proper motion and parallax. + + Given: + rc,dc double ICRS RA,Dec at catalog epoch (radians) + pr double RA proper motion (radians/year; Note 1) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + pmt double proper motion time interval (SSB, Julian years) + pob double[3] SSB to observer vector (au) + + Returned: + pco double[3] coordinate direction (BCRS unit vector) + + Notes: + + 1) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 2) The proper motion time interval is for when the starlight + reaches the solar system barycenter. + + 3) To avoid the need for iteration, the Roemer effect (i.e. the + small annual modulation of the proper motion coming from the + changing light time) is applied approximately, using the + direction of the star at the catalog epoch. + + References: + + 1984 Astronomical Almanac, pp B39-B41. + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013), Section 7.2. + + Called: + eraPdp scalar product of two p-vectors + eraPn decompose p-vector into modulus and direction + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + pmt_in = numpy.array(pmt, dtype=numpy.double, order="C", copy=False, subok=True) + pob_in = numpy.array(pob, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pob_in, (3,), "pob") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, pmt_in, pob_in[...,0]) + pco_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, pmt_in, pob_in[...,0], pco_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*8 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmpx(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pco_out.shape) > 0 and pco_out.shape[0] == 1 + pco_out = pco_out.reshape(pco_out.shape[1:]) + + return pco_out + + +def pmsafe(ra1, dec1, pmr1, pmd1, px1, rv1, ep1a, ep1b, ep2a, ep2b): + """ + Wrapper for ERFA function ``eraPmsafe``. + + Parameters + ---------- + ra1 : double array + dec1 : double array + pmr1 : double array + pmd1 : double array + px1 : double array + rv1 : double array + ep1a : double array + ep1b : double array + ep2a : double array + ep2b : double array + + Returns + ------- + ra2 : double array + dec2 : double array + pmr2 : double array + pmd2 : double array + px2 : double array + rv2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P m s a f e + - - - - - - - - - - + + Star proper motion: update star catalog data for space motion, with + special handling to handle the zero parallax case. + + Given: + ra1 double right ascension (radians), before + dec1 double declination (radians), before + pmr1 double RA proper motion (radians/year), before + pmd1 double Dec proper motion (radians/year), before + px1 double parallax (arcseconds), before + rv1 double radial velocity (km/s, +ve = receding), before + ep1a double "before" epoch, part A (Note 1) + ep1b double "before" epoch, part B (Note 1) + ep2a double "after" epoch, part A (Note 1) + ep2b double "after" epoch, part B (Note 1) + + Returned: + ra2 double right ascension (radians), after + dec2 double declination (radians), after + pmr2 double RA proper motion (radians/year), after + pmd2 double Dec proper motion (radians/year), after + px2 double parallax (arcseconds), after + rv2 double radial velocity (km/s, +ve = receding), after + + Returned (function value): + int status: + -1 = system error (should not occur) + 0 = no warnings or errors + 1 = distance overridden (Note 6) + 2 = excessive velocity (Note 7) + 4 = solution didn't converge (Note 8) + else = binary logical OR of the above warnings + + Notes: + + 1) The starting and ending TDB epochs ep1a+ep1b and ep2a+ep2b are + Julian Dates, apportioned in any convenient way between the two + parts (A and B). For example, JD(TDB)=2450123.7 could be + expressed in any of these ways, among others: + + epNa epNb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. + + 2) In accordance with normal star-catalog conventions, the object's + right ascension and declination are freed from the effects of + secular aberration. The frame, which is aligned to the catalog + equator and equinox, is Lorentzian and centered on the SSB. + + The proper motions are the rate of change of the right ascension + and declination at the catalog epoch and are in radians per TDB + Julian year. + + The parallax and radial velocity are in the same frame. + + 3) Care is needed with units. The star coordinates are in radians + and the proper motions in radians per Julian year, but the + parallax is in arcseconds. + + 4) The RA proper motion is in terms of coordinate angle, not true + angle. If the catalog uses arcseconds for both RA and Dec proper + motions, the RA proper motion will need to be divided by cos(Dec) + before use. + + 5) Straight-line motion at constant speed, in the inertial frame, is + assumed. + + 6) An extremely small (or zero or negative) parallax is overridden + to ensure that the object is at a finite but very large distance, + but not so large that the proper motion is equivalent to a large + but safe speed (about 0.1c using the chosen constant). A warning + status of 1 is added to the status if this action has been taken. + + 7) If the space velocity is a significant fraction of c (see the + constant VMAX in the function eraStarpv), it is arbitrarily set + to zero. When this action occurs, 2 is added to the status. + + 8) The relativistic adjustment carried out in the eraStarpv function + involves an iterative calculation. If the process fails to + converge within a set number of iterations, 4 is added to the + status. + + Called: + eraSeps angle between two points + eraStarpm update star catalog data for space motion + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ra1_in = numpy.array(ra1, dtype=numpy.double, order="C", copy=False, subok=True) + dec1_in = numpy.array(dec1, dtype=numpy.double, order="C", copy=False, subok=True) + pmr1_in = numpy.array(pmr1, dtype=numpy.double, order="C", copy=False, subok=True) + pmd1_in = numpy.array(pmd1, dtype=numpy.double, order="C", copy=False, subok=True) + px1_in = numpy.array(px1, dtype=numpy.double, order="C", copy=False, subok=True) + rv1_in = numpy.array(rv1, dtype=numpy.double, order="C", copy=False, subok=True) + ep1a_in = numpy.array(ep1a, dtype=numpy.double, order="C", copy=False, subok=True) + ep1b_in = numpy.array(ep1b, dtype=numpy.double, order="C", copy=False, subok=True) + ep2a_in = numpy.array(ep2a, dtype=numpy.double, order="C", copy=False, subok=True) + ep2b_in = numpy.array(ep2b, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ra1_in, dec1_in, pmr1_in, pmd1_in, px1_in, rv1_in, ep1a_in, ep1b_in, ep2a_in, ep2b_in) + ra2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dec2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmr2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmd2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + px2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rv2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ra1_in, dec1_in, pmr1_in, pmd1_in, px1_in, rv1_in, ep1a_in, ep1b_in, ep2a_in, ep2b_in, ra2_out, dec2_out, pmr2_out, pmd2_out, px2_out, rv2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*10 + [['readwrite']]*7 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmsafe(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'pmsafe') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ra2_out.shape) > 0 and ra2_out.shape[0] == 1 + ra2_out = ra2_out.reshape(ra2_out.shape[1:]) + assert len(dec2_out.shape) > 0 and dec2_out.shape[0] == 1 + dec2_out = dec2_out.reshape(dec2_out.shape[1:]) + assert len(pmr2_out.shape) > 0 and pmr2_out.shape[0] == 1 + pmr2_out = pmr2_out.reshape(pmr2_out.shape[1:]) + assert len(pmd2_out.shape) > 0 and pmd2_out.shape[0] == 1 + pmd2_out = pmd2_out.reshape(pmd2_out.shape[1:]) + assert len(px2_out.shape) > 0 and px2_out.shape[0] == 1 + px2_out = px2_out.reshape(px2_out.shape[1:]) + assert len(rv2_out.shape) > 0 and rv2_out.shape[0] == 1 + rv2_out = rv2_out.reshape(rv2_out.shape[1:]) + + return ra2_out, dec2_out, pmr2_out, pmd2_out, px2_out, rv2_out +STATUS_CODES['pmsafe'] = {-1: 'system error (should not occur)', 0: 'no warnings or errors', 1: 'distance overridden (Note 6)', 2: 'excessive velocity (Note 7)', 4: "solution didn't converge (Note 8)", 'else': 'binary logical OR of the above warnings'} + + + +def pvtob(elong, phi, hm, xp, yp, sp, theta): + """ + Wrapper for ERFA function ``eraPvtob``. + + Parameters + ---------- + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + sp : double array + theta : double array + + Returns + ------- + pv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P v t o b + - - - - - - - - - + + Position and velocity of a terrestrial observing station. + + Given: + elong double longitude (radians, east +ve, Note 1) + phi double latitude (geodetic, radians, Note 1) + hm double height above ref. ellipsoid (geodetic, m) + xp,yp double coordinates of the pole (radians, Note 2) + sp double the TIO locator s' (radians, Note 2) + theta double Earth rotation angle (radians, Note 3) + + Returned: + pv double[2][3] position/velocity vector (m, m/s, CIRS) + + Notes: + + 1) The terrestrial coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. + + 2) xp and yp are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions), measured along the + meridians 0 and 90 deg west respectively. sp is the TIO locator + s', in radians, which positions the Terrestrial Intermediate + Origin on the equator. For many applications, xp, yp and + (especially) sp can be set to zero. + + 3) If theta is Greenwich apparent sidereal time instead of Earth + rotation angle, the result is with respect to the true equator + and equinox of date, i.e. with the x-axis at the equinox rather + than the celestial intermediate origin. + + 4) The velocity units are meters per UT1 second, not per SI second. + This is unlikely to have any practical consequences in the modern + era. + + 5) No validation is performed on the arguments. Error cases that + could lead to arithmetic exceptions are trapped by the eraGd2gc + function, and the result set to zeros. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013), Section 7.4.3.3. + + Called: + eraGd2gc geodetic to geocentric transformation + eraPom00 polar motion matrix + eraTrxp product of transpose of r-matrix and p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + sp_in = numpy.array(sp, dtype=numpy.double, order="C", copy=False, subok=True) + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), elong_in, phi_in, hm_in, xp_in, yp_in, sp_in, theta_in) + pv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [elong_in, phi_in, hm_in, xp_in, yp_in, sp_in, theta_in, pv_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pvtob(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pv_out.shape) > 0 and pv_out.shape[0] == 1 + pv_out = pv_out.reshape(pv_out.shape[1:]) + + return pv_out + + +def refco(phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraRefco``. + + Parameters + ---------- + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + refa : double array + refb : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a R e f c o + - - - - - - - - - + + Determine the constants A and B in the atmospheric refraction model + dZ = A tan Z + B tan^3 Z. + + Z is the "observed" zenith distance (i.e. affected by refraction) + and dZ is what to add to Z to give the "topocentric" (i.e. in vacuo) + zenith distance. + + Given: + phpa double pressure at the observer (hPa = millibar) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers) + + Returned: + refa double* tan Z coefficient (radians) + refb double* tan^3 Z coefficient (radians) + + Notes: + + 1) The model balances speed and accuracy to give good results in + applications where performance at low altitudes is not paramount. + Performance is maintained across a range of conditions, and + applies to both optical/IR and radio. + + 2) The model omits the effects of (i) height above sea level (apart + from the reduced pressure itself), (ii) latitude (i.e. the + flattening of the Earth), (iii) variations in tropospheric lapse + rate and (iv) dispersive effects in the radio. + + The model was tested using the following range of conditions: + + lapse rates 0.0055, 0.0065, 0.0075 deg/meter + latitudes 0, 25, 50, 75 degrees + heights 0, 2500, 5000 meters ASL + pressures mean for height -10% to +5% in steps of 5% + temperatures -10 deg to +20 deg with respect to 280 deg at SL + relative humidity 0, 0.5, 1 + wavelengths 0.4, 0.6, ... 2 micron, + radio + zenith distances 15, 45, 75 degrees + + The accuracy with respect to raytracing through a model + atmosphere was as follows: + + worst RMS + + optical/IR 62 mas 8 mas + radio 319 mas 49 mas + + For this particular set of conditions: + + lapse rate 0.0065 K/meter + latitude 50 degrees + sea level + pressure 1005 mb + temperature 280.15 K + humidity 80% + wavelength 5740 Angstroms + + the results were as follows: + + ZD raytrace eraRefco Saastamoinen + + 10 10.27 10.27 10.27 + 20 21.19 21.20 21.19 + 30 33.61 33.61 33.60 + 40 48.82 48.83 48.81 + 45 58.16 58.18 58.16 + 50 69.28 69.30 69.27 + 55 82.97 82.99 82.95 + 60 100.51 100.54 100.50 + 65 124.23 124.26 124.20 + 70 158.63 158.68 158.61 + 72 177.32 177.37 177.31 + 74 200.35 200.38 200.32 + 76 229.45 229.43 229.42 + 78 267.44 267.29 267.41 + 80 319.13 318.55 319.10 + + deg arcsec arcsec arcsec + + The values for Saastamoinen's formula (which includes terms + up to tan^5) are taken from Hohenkerk and Sinclair (1985). + + 3) A wl value in the range 0-100 selects the optical/IR case and is + wavelength in micrometers. Any value outside this range selects + the radio case. + + 4) Outlandish input parameters are silently limited to + mathematically safe values. Zero pressure is permissible, and + causes zeroes to be returned. + + 5) The algorithm draws on several sources, as follows: + + a) The formula for the saturation vapour pressure of water as + a function of temperature and temperature is taken from + Equations (A4.5-A4.7) of Gill (1982). + + b) The formula for the water vapour pressure, given the + saturation pressure and the relative humidity, is from + Crane (1976), Equation (2.5.5). + + c) The refractivity of air is a function of temperature, + total pressure, water-vapour pressure and, in the case + of optical/IR, wavelength. The formulae for the two cases are + developed from Hohenkerk & Sinclair (1985) and Rueger (2002). + + d) The formula for beta, the ratio of the scale height of the + atmosphere to the geocentric distance of the observer, is + an adaption of Equation (9) from Stone (1996). The + adaptations, arrived at empirically, consist of (i) a small + adjustment to the coefficient and (ii) a humidity term for the + radio case only. + + e) The formulae for the refraction constants as a function of + n-1 and beta are from Green (1987), Equation (4.31). + + References: + + Crane, R.K., Meeks, M.L. (ed), "Refraction Effects in the Neutral + Atmosphere", Methods of Experimental Physics: Astrophysics 12B, + Academic Press, 1976. + + Gill, Adrian E., "Atmosphere-Ocean Dynamics", Academic Press, + 1982. + + Green, R.M., "Spherical Astronomy", Cambridge University Press, + 1987. + + Hohenkerk, C.Y., & Sinclair, A.T., NAO Technical Note No. 63, + 1985. + + Rueger, J.M., "Refractive Index Formulae for Electronic Distance + Measurement with Radio and Millimetre Waves", in Unisurv Report + S-68, School of Surveying and Spatial Information Systems, + University of New South Wales, Sydney, Australia, 2002. + + Stone, Ronald C., P.A.S.P. 108, 1051-1058, 1996. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), phpa_in, tc_in, rh_in, wl_in) + refa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + refb_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [phpa_in, tc_in, rh_in, wl_in, refa_out, refb_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._refco(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(refa_out.shape) > 0 and refa_out.shape[0] == 1 + refa_out = refa_out.reshape(refa_out.shape[1:]) + assert len(refb_out.shape) > 0 and refb_out.shape[0] == 1 + refb_out = refb_out.reshape(refb_out.shape[1:]) + + return refa_out, refb_out + + +def epv00(date1, date2): + """ + Wrapper for ERFA function ``eraEpv00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + pvh : double array + pvb : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E p v 0 0 + - - - - - - - - - + + Earth position and velocity, heliocentric and barycentric, with + respect to the Barycentric Celestial Reference System. + + Given: + date1,date2 double TDB date (Note 1) + + Returned: + pvh double[2][3] heliocentric Earth position/velocity + pvb double[2][3] barycentric Earth position/velocity + + Returned (function value): + int status: 0 = OK + +1 = warning: date outside + the range 1900-2100 AD + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. However, + the accuracy of the result is more likely to be limited by the + algorithm itself than the way the date has been expressed. + + n.b. TT can be used instead of TDB in most applications. + + 2) On return, the arrays pvh and pvb contain the following: + + pvh[0][0] x } + pvh[0][1] y } heliocentric position, au + pvh[0][2] z } + + pvh[1][0] xdot } + pvh[1][1] ydot } heliocentric velocity, au/d + pvh[1][2] zdot } + + pvb[0][0] x } + pvb[0][1] y } barycentric position, au + pvb[0][2] z } + + pvb[1][0] xdot } + pvb[1][1] ydot } barycentric velocity, au/d + pvb[1][2] zdot } + + The vectors are with respect to the Barycentric Celestial + Reference System. The time unit is one day in TDB. + + 3) The function is a SIMPLIFIED SOLUTION from the planetary theory + VSOP2000 (X. Moisson, P. Bretagnon, 2001, Celes. Mechanics & + Dyn. Astron., 80, 3/4, 205-213) and is an adaptation of original + Fortran code supplied by P. Bretagnon (private comm., 2000). + + 4) Comparisons over the time span 1900-2100 with this simplified + solution and the JPL DE405 ephemeris give the following results: + + RMS max + Heliocentric: + position error 3.7 11.2 km + velocity error 1.4 5.0 mm/s + + Barycentric: + position error 4.6 13.4 km + velocity error 1.4 4.9 mm/s + + Comparisons with the JPL DE406 ephemeris show that by 1800 and + 2200 the position errors are approximately double their 1900-2100 + size. By 1500 and 2500 the deterioration is a factor of 10 and + by 1000 and 3000 a factor of 60. The velocity accuracy falls off + at about half that rate. + + 5) It is permissible to use the same array for pvh and pvb, which + will receive the barycentric values. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + pvh_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + pvb_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, pvh_out[...,0,0], pvb_out[...,0,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epv00(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'epv00') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pvh_out.shape) > 0 and pvh_out.shape[0] == 1 + pvh_out = pvh_out.reshape(pvh_out.shape[1:]) + assert len(pvb_out.shape) > 0 and pvb_out.shape[0] == 1 + pvb_out = pvb_out.reshape(pvb_out.shape[1:]) + + return pvh_out, pvb_out +STATUS_CODES['epv00'] = {0: 'OK', 1: 'warning: date outsidethe range 1900-2100 AD'} + + + +def plan94(date1, date2, np): + """ + Wrapper for ERFA function ``eraPlan94``. + + Parameters + ---------- + date1 : double array + date2 : double array + np : int array + + Returns + ------- + pv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P l a n 9 4 + - - - - - - - - - - + + Approximate heliocentric position and velocity of a nominated major + planet: Mercury, Venus, EMB, Mars, Jupiter, Saturn, Uranus or + Neptune (but not the Earth itself). + + Given: + date1 double TDB date part A (Note 1) + date2 double TDB date part B (Note 1) + np int planet (1=Mercury, 2=Venus, 3=EMB, 4=Mars, + 5=Jupiter, 6=Saturn, 7=Uranus, 8=Neptune) + + Returned (argument): + pv double[2][3] planet p,v (heliocentric, J2000.0, au,au/d) + + Returned (function value): + int status: -1 = illegal NP (outside 1-8) + 0 = OK + +1 = warning: year outside 1000-3000 + +2 = warning: failed to converge + + Notes: + + 1) The date date1+date2 is in the TDB time scale (in practice TT can + be used) and is a Julian Date, apportioned in any convenient way + between the two arguments. For example, JD(TDB)=2450123.7 could + be expressed in any of these ways, among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. The limited + accuracy of the present algorithm is such that any of the methods + is satisfactory. + + 2) If an np value outside the range 1-8 is supplied, an error status + (function value -1) is returned and the pv vector set to zeroes. + + 3) For np=3 the result is for the Earth-Moon Barycenter. To obtain + the heliocentric position and velocity of the Earth, use instead + the ERFA function eraEpv00. + + 4) On successful return, the array pv contains the following: + + pv[0][0] x } + pv[0][1] y } heliocentric position, au + pv[0][2] z } + + pv[1][0] xdot } + pv[1][1] ydot } heliocentric velocity, au/d + pv[1][2] zdot } + + The reference frame is equatorial and is with respect to the + mean equator and equinox of epoch J2000.0. + + 5) The algorithm is due to J.L. Simon, P. Bretagnon, J. Chapront, + M. Chapront-Touze, G. Francou and J. Laskar (Bureau des + Longitudes, Paris, France). From comparisons with JPL + ephemeris DE102, they quote the following maximum errors + over the interval 1800-2050: + + L (arcsec) B (arcsec) R (km) + + Mercury 4 1 300 + Venus 5 1 800 + EMB 6 1 1000 + Mars 17 1 7700 + Jupiter 71 5 76000 + Saturn 81 13 267000 + Uranus 86 7 712000 + Neptune 11 1 253000 + + Over the interval 1000-3000, they report that the accuracy is no + worse than 1.5 times that over 1800-2050. Outside 1000-3000 the + accuracy declines. + + Comparisons of the present function with the JPL DE200 ephemeris + give the following RMS errors over the interval 1960-2025: + + position (km) velocity (m/s) + + Mercury 334 0.437 + Venus 1060 0.855 + EMB 2010 0.815 + Mars 7690 1.98 + Jupiter 71700 7.70 + Saturn 199000 19.4 + Uranus 564000 16.4 + Neptune 158000 14.4 + + Comparisons against DE200 over the interval 1800-2100 gave the + following maximum absolute differences. (The results using + DE406 were essentially the same.) + + L (arcsec) B (arcsec) R (km) Rdot (m/s) + + Mercury 7 1 500 0.7 + Venus 7 1 1100 0.9 + EMB 9 1 1300 1.0 + Mars 26 1 9000 2.5 + Jupiter 78 6 82000 8.2 + Saturn 87 14 263000 24.6 + Uranus 86 7 661000 27.4 + Neptune 11 2 248000 21.4 + + 6) The present ERFA re-implementation of the original Simon et al. + Fortran code differs from the original in the following respects: + + * C instead of Fortran. + + * The date is supplied in two parts. + + * The result is returned only in equatorial Cartesian form; + the ecliptic longitude, latitude and radius vector are not + returned. + + * The result is in the J2000.0 equatorial frame, not ecliptic. + + * More is done in-line: there are fewer calls to subroutines. + + * Different error/warning status values are used. + + * A different Kepler's-equation-solver is used (avoiding + use of double precision complex). + + * Polynomials in t are nested to minimize rounding errors. + + * Explicit double constants are used to avoid mixed-mode + expressions. + + None of the above changes affects the result significantly. + + 7) The returned status indicates the most serious condition + encountered during execution of the function. Illegal np is + considered the most serious, overriding failure to converge, + which in turn takes precedence over the remote date warning. + + Called: + eraAnp normalize angle into range 0 to 2pi + + Reference: Simon, J.L, Bretagnon, P., Chapront, J., + Chapront-Touze, M., Francou, G., and Laskar, J., + Astron. Astrophys. 282, 663 (1994). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + np_in = numpy.array(np, dtype=numpy.intc, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, np_in) + pv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, np_in, pv_out[...,0,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._plan94(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'plan94') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pv_out.shape) > 0 and pv_out.shape[0] == 1 + pv_out = pv_out.reshape(pv_out.shape[1:]) + + return pv_out +STATUS_CODES['plan94'] = {-1: 'illegal NP (outside 1-8)', 0: 'OK', 1: 'warning: year outside 1000-3000', 2: 'warning: failed to converge'} + + + +def fad03(t): + """ + Wrapper for ERFA function ``eraFad03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F a d 0 3 + - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean elongation of the Moon from the Sun. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double D, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fad03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fae03(t): + """ + Wrapper for ERFA function ``eraFae03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F a e 0 3 + - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Earth. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Earth, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fae03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def faf03(t): + """ + Wrapper for ERFA function ``eraFaf03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F a f 0 3 + - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of the Moon minus mean longitude of the ascending + node. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double F, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._faf03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def faju03(t): + """ + Wrapper for ERFA function ``eraFaju03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a j u 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Jupiter. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Jupiter, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._faju03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fal03(t): + """ + Wrapper for ERFA function ``eraFal03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F a l 0 3 + - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean anomaly of the Moon. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double l, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fal03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def falp03(t): + """ + Wrapper for ERFA function ``eraFalp03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a l p 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean anomaly of the Sun. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double l', radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._falp03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fama03(t): + """ + Wrapper for ERFA function ``eraFama03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a m a 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Mars. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Mars, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fama03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fame03(t): + """ + Wrapper for ERFA function ``eraFame03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a m e 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Mercury. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Mercury, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fame03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fane03(t): + """ + Wrapper for ERFA function ``eraFane03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a n e 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Neptune. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Neptune, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is adapted from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fane03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def faom03(t): + """ + Wrapper for ERFA function ``eraFaom03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a o m 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of the Moon's ascending node. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double Omega, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._faom03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fapa03(t): + """ + Wrapper for ERFA function ``eraFapa03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a p a 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + general accumulated precession in longitude. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double general precession in longitude, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003). It + is taken from Kinoshita & Souchay (1990) and comes originally + from Lieske et al. (1977). + + References: + + Kinoshita, H. and Souchay J. 1990, Celest.Mech. and Dyn.Astron. + 48, 187 + + Lieske, J.H., Lederle, T., Fricke, W. & Morando, B. 1977, + Astron.Astrophys. 58, 1-16 + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fapa03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fasa03(t): + """ + Wrapper for ERFA function ``eraFasa03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a s a 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Saturn. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Saturn, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fasa03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def faur03(t): + """ + Wrapper for ERFA function ``eraFaur03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a u r 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Uranus. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Uranus, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is adapted from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._faur03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fave03(t): + """ + Wrapper for ERFA function ``eraFave03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a v e 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Venus. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Venus, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fave03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def bi00(): + """ + Wrapper for ERFA function ``eraBi00``. + + Parameters + ---------- + + Returns + ------- + dpsibi : double array + depsbi : double array + dra : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a B i 0 0 + - - - - - - - - + + Frame bias components of IAU 2000 precession-nutation models (part + of MHB2000 with additions). + + Returned: + dpsibi,depsbi double longitude and obliquity corrections + dra double the ICRS RA of the J2000.0 mean equinox + + Notes: + + 1) The frame bias corrections in longitude and obliquity (radians) + are required in order to correct for the offset between the GCRS + pole and the mean J2000.0 pole. They define, with respect to the + GCRS frame, a J2000.0 mean pole that is consistent with the rest + of the IAU 2000A precession-nutation model. + + 2) In addition to the displacement of the pole, the complete + description of the frame bias requires also an offset in right + ascension. This is not part of the IAU 2000A model, and is from + Chapront et al. (2002). It is returned in radians. + + 3) This is a supplemented implementation of one aspect of the IAU + 2000A nutation model, formally adopted by the IAU General + Assembly in 2000, namely MHB2000 (Mathews et al. 2002). + + References: + + Chapront, J., Chapront-Touze, M. & Francou, G., Astron. + Astrophys., 387, 700, 2002. + + Mathews, P.M., Herring, T.A., Buffet, B.A., "Modeling of nutation + and precession New nutation series for nonrigid Earth and + insights into the Earth's interior", J.Geophys.Res., 107, B4, + 2002. The MHB2000 code itself was obtained on 9th September 2002 + from ftp://maia.usno.navy.mil/conv2000/chapter5/IAU2000A. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ) + dpsibi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + depsbi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dra_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dpsibi_out, depsbi_out, dra_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*0 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._bi00(it) + + return dpsibi_out, depsbi_out, dra_out + + +def bp00(date1, date2): + """ + Wrapper for ERFA function ``eraBp00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rb : double array + rp : double array + rbp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a B p 0 0 + - - - - - - - - + + Frame bias and precession, IAU 2000. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rb double[3][3] frame bias matrix (Note 2) + rp double[3][3] precession matrix (Note 3) + rbp double[3][3] bias-precession matrix (Note 4) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rb transforms vectors from GCRS to mean J2000.0 by + applying frame bias. + + 3) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 4) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 5) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the order given. + + Called: + eraBi00 frame bias components, IAU 2000 + eraPr00 IAU 2000 precession adjustments + eraIr initialize r-matrix to identity + eraRx rotate around X-axis + eraRy rotate around Y-axis + eraRz rotate around Z-axis + eraCr copy r-matrix + eraRxr product of two r-matrices + + Reference: + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._bp00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + + return rb_out, rp_out, rbp_out + + +def bp06(date1, date2): + """ + Wrapper for ERFA function ``eraBp06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rb : double array + rp : double array + rbp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a B p 0 6 + - - - - - - - - + + Frame bias and precession, IAU 2006. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rb double[3][3] frame bias matrix (Note 2) + rp double[3][3] precession matrix (Note 3) + rbp double[3][3] bias-precession matrix (Note 4) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rb transforms vectors from GCRS to mean J2000.0 by + applying frame bias. + + 3) The matrix rp transforms vectors from mean J2000.0 to mean of + date by applying precession. + + 4) The matrix rbp transforms vectors from GCRS to mean of date by + applying frame bias then precession. It is the product rp x rb. + + 5) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the order given. + + Called: + eraPfw06 bias-precession F-W angles, IAU 2006 + eraFw2m F-W angles to r-matrix + eraPmat06 PB matrix, IAU 2006 + eraTr transpose r-matrix + eraRxr product of two r-matrices + eraCr copy r-matrix + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._bp06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + + return rb_out, rp_out, rbp_out + + +def bpn2xy(rbpn): + """ + Wrapper for ERFA function ``eraBpn2xy``. + + Parameters + ---------- + rbpn : double array + + Returns + ------- + x : double array + y : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a B p n 2 x y + - - - - - - - - - - + + Extract from the bias-precession-nutation matrix the X,Y coordinates + of the Celestial Intermediate Pole. + + Given: + rbpn double[3][3] celestial-to-true matrix (Note 1) + + Returned: + x,y double Celestial Intermediate Pole (Note 2) + + Notes: + + 1) The matrix rbpn transforms vectors from GCRS to true equator (and + CIO or equinox) of date, and therefore the Celestial Intermediate + Pole unit vector is the bottom row of the matrix. + + 2) The arguments x,y are components of the Celestial Intermediate + Pole unit vector in the Geocentric Celestial Reference System. + + Reference: + + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 + (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rbpn_in = numpy.array(rbpn, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rbpn_in, (3, 3), "rbpn") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rbpn_in[...,0,0]) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rbpn_in[...,0,0], x_out, y_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._bpn2xy(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + + return x_out, y_out + + +def c2i00a(date1, date2): + """ + Wrapper for ERFA function ``eraC2i00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i 0 0 a + - - - - - - - - - - + + Form the celestial-to-intermediate matrix for a given date using the + IAU 2000A precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 3) A faster, but slightly less accurate result (about 1 mas), can be + obtained by using instead the eraC2i00b function. + + Called: + eraPnm00a classical NPB matrix, IAU 2000A + eraC2ibpn celestial-to-intermediate matrix, given NPB matrix + + References: + + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 + (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2i00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2i00b(date1, date2): + """ + Wrapper for ERFA function ``eraC2i00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i 0 0 b + - - - - - - - - - - + + Form the celestial-to-intermediate matrix for a given date using the + IAU 2000B precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 3) The present function is faster, but slightly less accurate (about + 1 mas), than the eraC2i00a function. + + Called: + eraPnm00b classical NPB matrix, IAU 2000B + eraC2ibpn celestial-to-intermediate matrix, given NPB matrix + + References: + + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 + (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2i00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2i06a(date1, date2): + """ + Wrapper for ERFA function ``eraC2i06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i 0 6 a + - - - - - - - - - - + + Form the celestial-to-intermediate matrix for a given date using the + IAU 2006 precession and IAU 2000A nutation models. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = RC2T * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraC2ixys celestial-to-intermediate matrix, given X,Y and s + + References: + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2i06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2ibpn(date1, date2, rbpn): + """ + Wrapper for ERFA function ``eraC2ibpn``. + + Parameters + ---------- + date1 : double array + date2 : double array + rbpn : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i b p n + - - - - - - - - - - + + Form the celestial-to-intermediate matrix for a given date given + the bias-precession-nutation matrix. IAU 2000. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + rbpn double[3][3] celestial-to-true matrix (Note 2) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 3) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rbpn transforms vectors from GCRS to true equator (and + CIO or equinox) of date. Only the CIP (bottom row) is used. + + 3) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = RC2T * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 4) Although its name does not include "00", This function is in fact + specific to the IAU 2000 models. + + Called: + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraC2ixy celestial-to-intermediate matrix, given X,Y + + References: + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + rbpn_in = numpy.array(rbpn, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rbpn_in, (3, 3), "rbpn") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, rbpn_in[...,0,0]) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbpn_in[...,0,0], rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2ibpn(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2ixy(date1, date2, x, y): + """ + Wrapper for ERFA function ``eraC2ixy``. + + Parameters + ---------- + date1 : double array + date2 : double array + x : double array + y : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a C 2 i x y + - - - - - - - - - + + Form the celestial to intermediate-frame-of-date matrix for a given + date when the CIP X,Y coordinates are known. IAU 2000. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + x,y double Celestial Intermediate Pole (Note 2) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 3) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The Celestial Intermediate Pole coordinates are the x,y components + of the unit vector in the Geocentric Celestial Reference System. + + 3) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = RC2T * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 4) Although its name does not include "00", This function is in fact + specific to the IAU 2000 models. + + Called: + eraC2ixys celestial-to-intermediate matrix, given X,Y and s + eraS00 the CIO locator s, given X,Y, IAU 2000A + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, x_in, y_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_in, y_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2ixy(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2ixys(x, y, s): + """ + Wrapper for ERFA function ``eraC2ixys``. + + Parameters + ---------- + x : double array + y : double array + s : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i x y s + - - - - - - - - - - + + Form the celestial to intermediate-frame-of-date matrix given the CIP + X,Y and the CIO locator s. + + Given: + x,y double Celestial Intermediate Pole (Note 1) + s double the CIO locator s (Note 2) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 3) + + Notes: + + 1) The Celestial Intermediate Pole coordinates are the x,y + components of the unit vector in the Geocentric Celestial + Reference System. + + 2) The CIO locator s (in radians) positions the Celestial + Intermediate Origin on the equator of the CIP. + + 3) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = RC2T * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + Called: + eraIr initialize r-matrix to identity + eraRz rotate around Z-axis + eraRy rotate around Y-axis + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), x_in, y_in, s_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [x_in, y_in, s_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2ixys(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2t00a(tta, ttb, uta, utb, xp, yp): + """ + Wrapper for ERFA function ``eraC2t00a``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t 0 0 a + - - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1 and + the polar motion, using the IAU 2000A nutation model. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + xp,yp double coordinates of the pole (radians, Note 2) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 3) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any of + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 3) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(ERA) * RC2I * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), RC2I is the + celestial-to-intermediate matrix, ERA is the Earth rotation + angle and RPOM is the polar motion matrix. + + 4) A faster, but slightly less accurate result (about 1 mas), can + be obtained by using instead the eraC2t00b function. + + Called: + eraC2i00a celestial-to-intermediate matrix, IAU 2000A + eraEra00 Earth rotation angle, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraPom00 polar motion matrix + eraC2tcio form CIO-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2t00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2t00b(tta, ttb, uta, utb, xp, yp): + """ + Wrapper for ERFA function ``eraC2t00b``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t 0 0 b + - - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1 and + the polar motion, using the IAU 2000B nutation model. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + xp,yp double coordinates of the pole (radians, Note 2) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 3) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any of + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 3) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(ERA) * RC2I * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), RC2I is the + celestial-to-intermediate matrix, ERA is the Earth rotation + angle and RPOM is the polar motion matrix. + + 4) The present function is faster, but slightly less accurate (about + 1 mas), than the eraC2t00a function. + + Called: + eraC2i00b celestial-to-intermediate matrix, IAU 2000B + eraEra00 Earth rotation angle, IAU 2000 + eraPom00 polar motion matrix + eraC2tcio form CIO-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2t00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2t06a(tta, ttb, uta, utb, xp, yp): + """ + Wrapper for ERFA function ``eraC2t06a``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t 0 6 a + - - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1 and + the polar motion, using the IAU 2006 precession and IAU 2000A + nutation models. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + xp,yp double coordinates of the pole (radians, Note 2) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 3) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any of + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 3) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(ERA) * RC2I * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), RC2I is the + celestial-to-intermediate matrix, ERA is the Earth rotation + angle and RPOM is the polar motion matrix. + + Called: + eraC2i06a celestial-to-intermediate matrix, IAU 2006/2000A + eraEra00 Earth rotation angle, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraPom00 polar motion matrix + eraC2tcio form CIO-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2t06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2tcio(rc2i, era, rpom): + """ + Wrapper for ERFA function ``eraC2tcio``. + + Parameters + ---------- + rc2i : double array + era : double array + rpom : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t c i o + - - - - - - - - - - + + Assemble the celestial to terrestrial matrix from CIO-based + components (the celestial-to-intermediate matrix, the Earth Rotation + Angle and the polar motion matrix). + + Given: + rc2i double[3][3] celestial-to-intermediate matrix + era double Earth rotation angle (radians) + rpom double[3][3] polar-motion matrix + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix + + Notes: + + 1) This function constructs the rotation matrix that transforms + vectors in the celestial system into vectors in the terrestrial + system. It does so starting from precomputed components, namely + the matrix which rotates from celestial coordinates to the + intermediate frame, the Earth rotation angle and the polar motion + matrix. One use of the present function is when generating a + series of celestial-to-terrestrial matrices where only the Earth + Rotation Angle changes, avoiding the considerable overhead of + recomputing the precession-nutation more often than necessary to + achieve given accuracy objectives. + + 2) The relationship between the arguments is as follows: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003). + + Called: + eraCr copy r-matrix + eraRz rotate around Z-axis + eraRxr product of two r-matrices + + Reference: + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc2i_in = numpy.array(rc2i, dtype=numpy.double, order="C", copy=False, subok=True) + era_in = numpy.array(era, dtype=numpy.double, order="C", copy=False, subok=True) + rpom_in = numpy.array(rpom, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rc2i_in, (3, 3), "rc2i") + check_trailing_shape(rpom_in, (3, 3), "rpom") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc2i_in[...,0,0], era_in, rpom_in[...,0,0]) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc2i_in[...,0,0], era_in, rpom_in[...,0,0], rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2tcio(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2teqx(rbpn, gst, rpom): + """ + Wrapper for ERFA function ``eraC2teqx``. + + Parameters + ---------- + rbpn : double array + gst : double array + rpom : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t e q x + - - - - - - - - - - + + Assemble the celestial to terrestrial matrix from equinox-based + components (the celestial-to-true matrix, the Greenwich Apparent + Sidereal Time and the polar motion matrix). + + Given: + rbpn double[3][3] celestial-to-true matrix + gst double Greenwich (apparent) Sidereal Time (radians) + rpom double[3][3] polar-motion matrix + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 2) + + Notes: + + 1) This function constructs the rotation matrix that transforms + vectors in the celestial system into vectors in the terrestrial + system. It does so starting from precomputed components, namely + the matrix which rotates from celestial coordinates to the + true equator and equinox of date, the Greenwich Apparent Sidereal + Time and the polar motion matrix. One use of the present function + is when generating a series of celestial-to-terrestrial matrices + where only the Sidereal Time changes, avoiding the considerable + overhead of recomputing the precession-nutation more often than + necessary to achieve given accuracy objectives. + + 2) The relationship between the arguments is as follows: + + [TRS] = rpom * R_3(gst) * rbpn * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003). + + Called: + eraCr copy r-matrix + eraRz rotate around Z-axis + eraRxr product of two r-matrices + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rbpn_in = numpy.array(rbpn, dtype=numpy.double, order="C", copy=False, subok=True) + gst_in = numpy.array(gst, dtype=numpy.double, order="C", copy=False, subok=True) + rpom_in = numpy.array(rpom, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rbpn_in, (3, 3), "rbpn") + check_trailing_shape(rpom_in, (3, 3), "rpom") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rbpn_in[...,0,0], gst_in, rpom_in[...,0,0]) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rbpn_in[...,0,0], gst_in, rpom_in[...,0,0], rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2teqx(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2tpe(tta, ttb, uta, utb, dpsi, deps, xp, yp): + """ + Wrapper for ERFA function ``eraC2tpe``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + dpsi : double array + deps : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a C 2 t p e + - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1, + the nutation and the polar motion. IAU 2000. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + dpsi,deps double nutation (Note 2) + xp,yp double coordinates of the pole (radians, Note 3) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 4) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any of + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The caller is responsible for providing the nutation components; + they are in longitude and obliquity, in radians and are with + respect to the equinox and ecliptic of date. For high-accuracy + applications, free core nutation should be included as well as + any other relevant corrections to the position of the CIP. + + 3) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 4) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(GST) * RBPN * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), RBPN is the + bias-precession-nutation matrix, GST is the Greenwich (apparent) + Sidereal Time and RPOM is the polar motion matrix. + + 5) Although its name does not include "00", This function is in fact + specific to the IAU 2000 models. + + Called: + eraPn00 bias/precession/nutation results, IAU 2000 + eraGmst00 Greenwich mean sidereal time, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraEe00 equation of the equinoxes, IAU 2000 + eraPom00 polar motion matrix + eraC2teqx form equinox-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + deps_in = numpy.array(deps, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, dpsi_in, deps_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, dpsi_in, deps_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*8 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2tpe(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2txy(tta, ttb, uta, utb, x, y, xp, yp): + """ + Wrapper for ERFA function ``eraC2txy``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + x : double array + y : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a C 2 t x y + - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1, + the CIP coordinates and the polar motion. IAU 2000. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + x,y double Celestial Intermediate Pole (Note 2) + xp,yp double coordinates of the pole (radians, Note 3) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 4) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any o + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The Celestial Intermediate Pole coordinates are the x,y + components of the unit vector in the Geocentric Celestial + Reference System. + + 3) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 4) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(ERA) * RC2I * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 5) Although its name does not include "00", This function is in fact + specific to the IAU 2000 models. + + Called: + eraC2ixy celestial-to-intermediate matrix, given X,Y + eraEra00 Earth rotation angle, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraPom00 polar motion matrix + eraC2tcio form CIO-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, x_in, y_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, x_in, y_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*8 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2txy(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def eo06a(date1, date2): + """ + Wrapper for ERFA function ``eraEo06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E o 0 6 a + - - - - - - - - - + + Equation of the origins, IAU 2006 precession and IAU 2000A nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double equation of the origins in radians + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The equation of the origins is the distance between the true + equinox and the celestial intermediate origin and, equivalently, + the difference between Earth rotation angle and Greenwich + apparent sidereal time (ERA-GST). It comprises the precession + (since J2000.0) in right ascension plus the equation of the + equinoxes (including the small correction terms). + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraEors equation of the origins, given NPB matrix and s + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eo06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def eors(rnpb, s): + """ + Wrapper for ERFA function ``eraEors``. + + Parameters + ---------- + rnpb : double array + s : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a E o r s + - - - - - - - - + + Equation of the origins, given the classical NPB matrix and the + quantity s. + + Given: + rnpb double[3][3] classical nutation x precession x bias matrix + s double the quantity s (the CIO locator) + + Returned (function value): + double the equation of the origins in radians. + + Notes: + + 1) The equation of the origins is the distance between the true + equinox and the celestial intermediate origin and, equivalently, + the difference between Earth rotation angle and Greenwich + apparent sidereal time (ERA-GST). It comprises the precession + (since J2000.0) in right ascension plus the equation of the + equinoxes (including the small correction terms). + + 2) The algorithm is from Wallace & Capitaine (2006). + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rnpb_in = numpy.array(rnpb, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rnpb_in, (3, 3), "rnpb") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rnpb_in[...,0,0], s_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rnpb_in[...,0,0], s_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eors(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fw2m(gamb, phib, psi, eps): + """ + Wrapper for ERFA function ``eraFw2m``. + + Parameters + ---------- + gamb : double array + phib : double array + psi : double array + eps : double array + + Returns + ------- + r : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a F w 2 m + - - - - - - - - + + Form rotation matrix given the Fukushima-Williams angles. + + Given: + gamb double F-W angle gamma_bar (radians) + phib double F-W angle phi_bar (radians) + psi double F-W angle psi (radians) + eps double F-W angle epsilon (radians) + + Returned: + r double[3][3] rotation matrix + + Notes: + + 1) Naming the following points: + + e = J2000.0 ecliptic pole, + p = GCRS pole, + E = ecliptic pole of date, + and P = CIP, + + the four Fukushima-Williams angles are as follows: + + gamb = gamma = epE + phib = phi = pE + psi = psi = pEP + eps = epsilon = EP + + 2) The matrix representing the combined effects of frame bias, + precession and nutation is: + + NxPxB = R_1(-eps).R_3(-psi).R_1(phib).R_3(gamb) + + 3) Three different matrices can be constructed, depending on the + supplied angles: + + o To obtain the nutation x precession x frame bias matrix, + generate the four precession angles, generate the nutation + components and add them to the psi_bar and epsilon_A angles, + and call the present function. + + o To obtain the precession x frame bias matrix, generate the + four precession angles and call the present function. + + o To obtain the frame bias matrix, generate the four precession + angles for date J2000.0 and call the present function. + + The nutation-only and precession-only matrices can if necessary + be obtained by combining these three appropriately. + + Called: + eraIr initialize r-matrix to identity + eraRz rotate around Z-axis + eraRx rotate around X-axis + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + gamb_in = numpy.array(gamb, dtype=numpy.double, order="C", copy=False, subok=True) + phib_in = numpy.array(phib, dtype=numpy.double, order="C", copy=False, subok=True) + psi_in = numpy.array(psi, dtype=numpy.double, order="C", copy=False, subok=True) + eps_in = numpy.array(eps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), gamb_in, phib_in, psi_in, eps_in) + r_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [gamb_in, phib_in, psi_in, eps_in, r_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fw2m(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(r_out.shape) > 0 and r_out.shape[0] == 1 + r_out = r_out.reshape(r_out.shape[1:]) + + return r_out + + +def fw2xy(gamb, phib, psi, eps): + """ + Wrapper for ERFA function ``eraFw2xy``. + + Parameters + ---------- + gamb : double array + phib : double array + psi : double array + eps : double array + + Returns + ------- + x : double array + y : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F w 2 x y + - - - - - - - - - + + CIP X,Y given Fukushima-Williams bias-precession-nutation angles. + + Given: + gamb double F-W angle gamma_bar (radians) + phib double F-W angle phi_bar (radians) + psi double F-W angle psi (radians) + eps double F-W angle epsilon (radians) + + Returned: + x,y double CIP unit vector X,Y + + Notes: + + 1) Naming the following points: + + e = J2000.0 ecliptic pole, + p = GCRS pole + E = ecliptic pole of date, + and P = CIP, + + the four Fukushima-Williams angles are as follows: + + gamb = gamma = epE + phib = phi = pE + psi = psi = pEP + eps = epsilon = EP + + 2) The matrix representing the combined effects of frame bias, + precession and nutation is: + + NxPxB = R_1(-epsA).R_3(-psi).R_1(phib).R_3(gamb) + + The returned values x,y are elements [2][0] and [2][1] of the + matrix. Near J2000.0, they are essentially angles in radians. + + Called: + eraFw2m F-W angles to r-matrix + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + gamb_in = numpy.array(gamb, dtype=numpy.double, order="C", copy=False, subok=True) + phib_in = numpy.array(phib, dtype=numpy.double, order="C", copy=False, subok=True) + psi_in = numpy.array(psi, dtype=numpy.double, order="C", copy=False, subok=True) + eps_in = numpy.array(eps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), gamb_in, phib_in, psi_in, eps_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [gamb_in, phib_in, psi_in, eps_in, x_out, y_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fw2xy(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + + return x_out, y_out + + +def ltp(epj): + """ + Wrapper for ERFA function ``eraLtp``. + + Parameters + ---------- + epj : double array + + Returns + ------- + rp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a L t p + - - - - - - - + + Long-term precession matrix. + + Given: + epj double Julian epoch (TT) + + Returned: + rp double[3][3] precession matrix, J2000.0 to date + + Notes: + + 1) The matrix is in the sense + + P_date = rp x P_J2000, + + where P_J2000 is a vector with respect to the J2000.0 mean + equator and equinox and P_date is the same vector with respect to + the equator and equinox of epoch epj. + + 2) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + Called: + eraLtpequ equator pole, long term + eraLtpecl ecliptic pole, long term + eraPxp vector product + eraPn normalize vector + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, rp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltp(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + + return rp_out + + +def ltpb(epj): + """ + Wrapper for ERFA function ``eraLtpb``. + + Parameters + ---------- + epj : double array + + Returns + ------- + rpb : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a L t p b + - - - - - - - - + + Long-term precession matrix, including ICRS frame bias. + + Given: + epj double Julian epoch (TT) + + Returned: + rpb double[3][3] precession-bias matrix, J2000.0 to date + + Notes: + + 1) The matrix is in the sense + + P_date = rpb x P_ICRS, + + where P_ICRS is a vector in the Geocentric Celestial Reference + System, and P_date is the vector with respect to the Celestial + Intermediate Reference System at that date but with nutation + neglected. + + 2) A first order frame bias formulation is used, of sub- + microarcsecond accuracy compared with a full 3D rotation. + + 3) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + rpb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, rpb_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltpb(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rpb_out.shape) > 0 and rpb_out.shape[0] == 1 + rpb_out = rpb_out.reshape(rpb_out.shape[1:]) + + return rpb_out + + +def ltpecl(epj): + """ + Wrapper for ERFA function ``eraLtpecl``. + + Parameters + ---------- + epj : double array + + Returns + ------- + vec : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a L t p e c l + - - - - - - - - - - + + Long-term precession of the ecliptic. + + Given: + epj double Julian epoch (TT) + + Returned: + vec double[3] ecliptic pole unit vector + + Notes: + + 1) The returned vector is with respect to the J2000.0 mean equator + and equinox. + + 2) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + vec_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, vec_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltpecl(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(vec_out.shape) > 0 and vec_out.shape[0] == 1 + vec_out = vec_out.reshape(vec_out.shape[1:]) + + return vec_out + + +def ltpequ(epj): + """ + Wrapper for ERFA function ``eraLtpequ``. + + Parameters + ---------- + epj : double array + + Returns + ------- + veq : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a L t p e q u + - - - - - - - - - - + + Long-term precession of the equator. + + Given: + epj double Julian epoch (TT) + + Returned: + veq double[3] equator pole unit vector + + Notes: + + 1) The returned vector is with respect to the J2000.0 mean equator + and equinox. + + 2) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + veq_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, veq_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltpequ(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(veq_out.shape) > 0 and veq_out.shape[0] == 1 + veq_out = veq_out.reshape(veq_out.shape[1:]) + + return veq_out + + +def num00a(date1, date2): + """ + Wrapper for ERFA function ``eraNum00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u m 0 0 a + - - - - - - - - - - + + Form the matrix of nutation for a given date, IAU 2000A model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rmatn double[3][3] nutation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(true) = rmatn * V(mean), where + the p-vector V(true) is with respect to the true equatorial triad + of date and the p-vector V(mean) is with respect to the mean + equatorial triad of date. + + 3) A faster, but slightly less accurate result (about 1 mas), can be + obtained by using instead the eraNum00b function. + + Called: + eraPn00a bias/precession/nutation, IAU 2000A + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222-3 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._num00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def num00b(date1, date2): + """ + Wrapper for ERFA function ``eraNum00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u m 0 0 b + - - - - - - - - - - + + Form the matrix of nutation for a given date, IAU 2000B model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rmatn double[3][3] nutation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(true) = rmatn * V(mean), where + the p-vector V(true) is with respect to the true equatorial triad + of date and the p-vector V(mean) is with respect to the mean + equatorial triad of date. + + 3) The present function is faster, but slightly less accurate (about + 1 mas), than the eraNum00a function. + + Called: + eraPn00b bias/precession/nutation, IAU 2000B + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222-3 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._num00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def num06a(date1, date2): + """ + Wrapper for ERFA function ``eraNum06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u m 0 6 a + - - - - - - - - - - + + Form the matrix of nutation for a given date, IAU 2006/2000A model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rmatn double[3][3] nutation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(true) = rmatn * V(mean), where + the p-vector V(true) is with respect to the true equatorial triad + of date and the p-vector V(mean) is with respect to the mean + equatorial triad of date. + + Called: + eraObl06 mean obliquity, IAU 2006 + eraNut06a nutation, IAU 2006/2000A + eraNumat form nutation matrix + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222-3 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._num06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def numat(epsa, dpsi, deps): + """ + Wrapper for ERFA function ``eraNumat``. + + Parameters + ---------- + epsa : double array + dpsi : double array + deps : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a N u m a t + - - - - - - - - - + + Form the matrix of nutation. + + Given: + epsa double mean obliquity of date (Note 1) + dpsi,deps double nutation (Note 2) + + Returned: + rmatn double[3][3] nutation matrix (Note 3) + + Notes: + + + 1) The supplied mean obliquity epsa, must be consistent with the + precession-nutation models from which dpsi and deps were obtained. + + 2) The caller is responsible for providing the nutation components; + they are in longitude and obliquity, in radians and are with + respect to the equinox and ecliptic of date. + + 3) The matrix operates in the sense V(true) = rmatn * V(mean), + where the p-vector V(true) is with respect to the true + equatorial triad of date and the p-vector V(mean) is with + respect to the mean equatorial triad of date. + + Called: + eraIr initialize r-matrix to identity + eraRx rotate around X-axis + eraRz rotate around Z-axis + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222-3 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epsa_in = numpy.array(epsa, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + deps_in = numpy.array(deps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epsa_in, dpsi_in, deps_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epsa_in, dpsi_in, deps_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._numat(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def nut00a(date1, date2): + """ + Wrapper for ERFA function ``eraNut00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u t 0 0 a + - - - - - - - - - - + + Nutation, IAU 2000A model (MHB2000 luni-solar and planetary nutation + with free core nutation omitted). + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation, luni-solar + planetary (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components in longitude and obliquity are in radians + and with respect to the equinox and ecliptic of date. The + obliquity at J2000.0 is assumed to be the Lieske et al. (1977) + value of 84381.448 arcsec. + + Both the luni-solar and planetary nutations are included. The + latter are due to direct planetary nutations and the + perturbations of the lunar and terrestrial orbits. + + 3) The function computes the MHB2000 nutation series with the + associated corrections for planetary nutations. It is an + implementation of the nutation part of the IAU 2000A precession- + nutation model, formally adopted by the IAU General Assembly in + 2000, namely MHB2000 (Mathews et al. 2002), but with the free + core nutation (FCN - see Note 4) omitted. + + 4) The full MHB2000 model also contains contributions to the + nutations in longitude and obliquity due to the free-excitation + of the free-core-nutation during the period 1979-2000. These FCN + terms, which are time-dependent and unpredictable, are NOT + included in the present function and, if required, must be + independently computed. With the FCN corrections included, the + present function delivers a pole which is at current epochs + accurate to a few hundred microarcseconds. The omission of FCN + introduces further errors of about that size. + + 5) The present function provides classical nutation. The MHB2000 + algorithm, from which it is adapted, deals also with (i) the + offsets between the GCRS and mean poles and (ii) the adjustments + in longitude and obliquity due to the changed precession rates. + These additional functions, namely frame bias and precession + adjustments, are supported by the ERFA functions eraBi00 and + eraPr00. + + 6) The MHB2000 algorithm also provides "total" nutations, comprising + the arithmetic sum of the frame bias, precession adjustments, + luni-solar nutation and planetary nutation. These total + nutations can be used in combination with an existing IAU 1976 + precession implementation, such as eraPmat76, to deliver GCRS- + to-true predictions of sub-mas accuracy at current dates. + However, there are three shortcomings in the MHB2000 model that + must be taken into account if more accurate or definitive results + are required (see Wallace 2002): + + (i) The MHB2000 total nutations are simply arithmetic sums, + yet in reality the various components are successive Euler + rotations. This slight lack of rigor leads to cross terms + that exceed 1 mas after a century. The rigorous procedure + is to form the GCRS-to-true rotation matrix by applying the + bias, precession and nutation in that order. + + (ii) Although the precession adjustments are stated to be with + respect to Lieske et al. (1977), the MHB2000 model does + not specify which set of Euler angles are to be used and + how the adjustments are to be applied. The most literal + and straightforward procedure is to adopt the 4-rotation + epsilon_0, psi_A, omega_A, xi_A option, and to add DPSIPR + to psi_A and DEPSPR to both omega_A and eps_A. + + (iii) The MHB2000 model predates the determination by Chapront + et al. (2002) of a 14.6 mas displacement between the + J2000.0 mean equinox and the origin of the ICRS frame. It + should, however, be noted that neglecting this displacement + when calculating star coordinates does not lead to a + 14.6 mas change in right ascension, only a small second- + order distortion in the pattern of the precession-nutation + effect. + + For these reasons, the ERFA functions do not generate the "total + nutations" directly, though they can of course easily be + generated by calling eraBi00, eraPr00 and the present function + and adding the results. + + 7) The MHB2000 model contains 41 instances where the same frequency + appears multiple times, of which 38 are duplicates and three are + triplicates. To keep the present code close to the original MHB + algorithm, this small inefficiency has not been corrected. + + Called: + eraFal03 mean anomaly of the Moon + eraFaf03 mean argument of the latitude of the Moon + eraFaom03 mean longitude of the Moon's ascending node + eraFame03 mean longitude of Mercury + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFama03 mean longitude of Mars + eraFaju03 mean longitude of Jupiter + eraFasa03 mean longitude of Saturn + eraFaur03 mean longitude of Uranus + eraFapa03 general accumulated precession in longitude + + References: + + Chapront, J., Chapront-Touze, M. & Francou, G. 2002, + Astron.Astrophys. 387, 700 + + Lieske, J.H., Lederle, T., Fricke, W. & Morando, B. 1977, + Astron.Astrophys. 58, 1-16 + + Mathews, P.M., Herring, T.A., Buffet, B.A. 2002, J.Geophys.Res. + 107, B4. The MHB_2000 code itself was obtained on 9th September + 2002 from ftp//maia.usno.navy.mil/conv2000/chapter5/IAU2000A. + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Wallace, P.T., "Software for Implementing the IAU 2000 + Resolutions", in IERS Workshop 5.1 (2002) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nut00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + + return dpsi_out, deps_out + + +def nut00b(date1, date2): + """ + Wrapper for ERFA function ``eraNut00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u t 0 0 b + - - - - - - - - - - + + Nutation, IAU 2000B model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation, luni-solar + planetary (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components in longitude and obliquity are in radians + and with respect to the equinox and ecliptic of date. The + obliquity at J2000.0 is assumed to be the Lieske et al. (1977) + value of 84381.448 arcsec. (The errors that result from using + this function with the IAU 2006 value of 84381.406 arcsec can be + neglected.) + + The nutation model consists only of luni-solar terms, but + includes also a fixed offset which compensates for certain long- + period planetary terms (Note 7). + + 3) This function is an implementation of the IAU 2000B abridged + nutation model formally adopted by the IAU General Assembly in + 2000. The function computes the MHB_2000_SHORT luni-solar + nutation series (Luzum 2001), but without the associated + corrections for the precession rate adjustments and the offset + between the GCRS and J2000.0 mean poles. + + 4) The full IAU 2000A (MHB2000) nutation model contains nearly 1400 + terms. The IAU 2000B model (McCarthy & Luzum 2003) contains only + 77 terms, plus additional simplifications, yet still delivers + results of 1 mas accuracy at present epochs. This combination of + accuracy and size makes the IAU 2000B abridged nutation model + suitable for most practical applications. + + The function delivers a pole accurate to 1 mas from 1900 to 2100 + (usually better than 1 mas, very occasionally just outside + 1 mas). The full IAU 2000A model, which is implemented in the + function eraNut00a (q.v.), delivers considerably greater accuracy + at current dates; however, to realize this improved accuracy, + corrections for the essentially unpredictable free-core-nutation + (FCN) must also be included. + + 5) The present function provides classical nutation. The + MHB_2000_SHORT algorithm, from which it is adapted, deals also + with (i) the offsets between the GCRS and mean poles and (ii) the + adjustments in longitude and obliquity due to the changed + precession rates. These additional functions, namely frame bias + and precession adjustments, are supported by the ERFA functions + eraBi00 and eraPr00. + + 6) The MHB_2000_SHORT algorithm also provides "total" nutations, + comprising the arithmetic sum of the frame bias, precession + adjustments, and nutation (luni-solar + planetary). These total + nutations can be used in combination with an existing IAU 1976 + precession implementation, such as eraPmat76, to deliver GCRS- + to-true predictions of mas accuracy at current epochs. However, + for symmetry with the eraNut00a function (q.v. for the reasons), + the ERFA functions do not generate the "total nutations" + directly. Should they be required, they could of course easily + be generated by calling eraBi00, eraPr00 and the present function + and adding the results. + + 7) The IAU 2000B model includes "planetary bias" terms that are + fixed in size but compensate for long-period nutations. The + amplitudes quoted in McCarthy & Luzum (2003), namely + Dpsi = -1.5835 mas and Depsilon = +1.6339 mas, are optimized for + the "total nutations" method described in Note 6. The Luzum + (2001) values used in this ERFA implementation, namely -0.135 mas + and +0.388 mas, are optimized for the "rigorous" method, where + frame bias, precession and nutation are applied separately and in + that order. During the interval 1995-2050, the ERFA + implementation delivers a maximum error of 1.001 mas (not + including FCN). + + References: + + Lieske, J.H., Lederle, T., Fricke, W., Morando, B., "Expressions + for the precession quantities based upon the IAU /1976/ system of + astronomical constants", Astron.Astrophys. 58, 1-2, 1-16. (1977) + + Luzum, B., private communication, 2001 (Fortran code + MHB_2000_SHORT) + + McCarthy, D.D. & Luzum, B.J., "An abridged model of the + precession-nutation of the celestial pole", Cel.Mech.Dyn.Astron. + 85, 37-49 (2003) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J., Astron.Astrophys. 282, 663-683 (1994) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nut00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + + return dpsi_out, deps_out + + +def nut06a(date1, date2): + """ + Wrapper for ERFA function ``eraNut06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u t 0 6 a + - - - - - - - - - - + + IAU 2000A nutation with adjustments to match the IAU 2006 + precession. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation, luni-solar + planetary (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components in longitude and obliquity are in radians + and with respect to the mean equinox and ecliptic of date, + IAU 2006 precession model (Hilton et al. 2006, Capitaine et al. + 2005). + + 3) The function first computes the IAU 2000A nutation, then applies + adjustments for (i) the consequences of the change in obliquity + from the IAU 1980 ecliptic to the IAU 2006 ecliptic and (ii) the + secular variation in the Earth's dynamical form factor J2. + + 4) The present function provides classical nutation, complementing + the IAU 2000 frame bias and IAU 2006 precession. It delivers a + pole which is at current epochs accurate to a few tens of + microarcseconds, apart from the free core nutation. + + Called: + eraNut00a nutation, IAU 2000A + + References: + + Chapront, J., Chapront-Touze, M. & Francou, G. 2002, + Astron.Astrophys. 387, 700 + + Lieske, J.H., Lederle, T., Fricke, W. & Morando, B. 1977, + Astron.Astrophys. 58, 1-16 + + Mathews, P.M., Herring, T.A., Buffet, B.A. 2002, J.Geophys.Res. + 107, B4. The MHB_2000 code itself was obtained on 9th September + 2002 from ftp//maia.usno.navy.mil/conv2000/chapter5/IAU2000A. + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Wallace, P.T., "Software for Implementing the IAU 2000 + Resolutions", in IERS Workshop 5.1 (2002) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nut06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + + return dpsi_out, deps_out + + +def nut80(date1, date2): + """ + Wrapper for ERFA function ``eraNut80``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a N u t 8 0 + - - - - - - - - - + + Nutation, IAU 1980 model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi double nutation in longitude (radians) + deps double nutation in obliquity (radians) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components are with respect to the ecliptic of + date. + + Called: + eraAnpm normalize angle into range +/- pi + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222 (p111). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nut80(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + + return dpsi_out, deps_out + + +def nutm80(date1, date2): + """ + Wrapper for ERFA function ``eraNutm80``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u t m 8 0 + - - - - - - - - - - + + Form the matrix of nutation for a given date, IAU 1980 model. + + Given: + date1,date2 double TDB date (Note 1) + + Returned: + rmatn double[3][3] nutation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(true) = rmatn * V(mean), + where the p-vector V(true) is with respect to the true + equatorial triad of date and the p-vector V(mean) is with + respect to the mean equatorial triad of date. + + Called: + eraNut80 nutation, IAU 1980 + eraObl80 mean obliquity, IAU 1980 + eraNumat form nutation matrix + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nutm80(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def obl06(date1, date2): + """ + Wrapper for ERFA function ``eraObl06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a O b l 0 6 + - - - - - - - - - + + Mean obliquity of the ecliptic, IAU 2006 precession model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double obliquity of the ecliptic (radians, Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result is the angle between the ecliptic and mean equator of + date date1+date2. + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._obl06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def obl80(date1, date2): + """ + Wrapper for ERFA function ``eraObl80``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a O b l 8 0 + - - - - - - - - - + + Mean obliquity of the ecliptic, IAU 1980 model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double obliquity of the ecliptic (radians, Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result is the angle between the ecliptic and mean equator of + date date1+date2. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Expression 3.222-1 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._obl80(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def p06e(date1, date2): + """ + Wrapper for ERFA function ``eraP06e``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + eps0 : double array + psia : double array + oma : double array + bpa : double array + bqa : double array + pia : double array + bpia : double array + epsa : double array + chia : double array + za : double array + zetaa : double array + thetaa : double array + pa : double array + gam : double array + phi : double array + psi : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P 0 6 e + - - - - - - - - + + Precession angles, IAU 2006, equinox based. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (see Note 2): + eps0 double epsilon_0 + psia double psi_A + oma double omega_A + bpa double P_A + bqa double Q_A + pia double pi_A + bpia double Pi_A + epsa double obliquity epsilon_A + chia double chi_A + za double z_A + zetaa double zeta_A + thetaa double theta_A + pa double p_A + gam double F-W angle gamma_J2000 + phi double F-W angle phi_J2000 + psi double F-W angle psi_J2000 + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) This function returns the set of equinox based angles for the + Capitaine et al. "P03" precession theory, adopted by the IAU in + 2006. The angles are set out in Table 1 of Hilton et al. (2006): + + eps0 epsilon_0 obliquity at J2000.0 + psia psi_A luni-solar precession + oma omega_A inclination of equator wrt J2000.0 ecliptic + bpa P_A ecliptic pole x, J2000.0 ecliptic triad + bqa Q_A ecliptic pole -y, J2000.0 ecliptic triad + pia pi_A angle between moving and J2000.0 ecliptics + bpia Pi_A longitude of ascending node of the ecliptic + epsa epsilon_A obliquity of the ecliptic + chia chi_A planetary precession + za z_A equatorial precession: -3rd 323 Euler angle + zetaa zeta_A equatorial precession: -1st 323 Euler angle + thetaa theta_A equatorial precession: 2nd 323 Euler angle + pa p_A general precession + gam gamma_J2000 J2000.0 RA difference of ecliptic poles + phi phi_J2000 J2000.0 codeclination of ecliptic pole + psi psi_J2000 longitude difference of equator poles, J2000.0 + + The returned values are all radians. + + 3) Hilton et al. (2006) Table 1 also contains angles that depend on + models distinct from the P03 precession theory itself, namely the + IAU 2000A frame bias and nutation. The quoted polynomials are + used in other ERFA functions: + + . eraXy06 contains the polynomial parts of the X and Y series. + + . eraS06 contains the polynomial part of the s+XY/2 series. + + . eraPfw06 implements the series for the Fukushima-Williams + angles that are with respect to the GCRS pole (i.e. the variants + that include frame bias). + + 4) The IAU resolution stipulated that the choice of parameterization + was left to the user, and so an IAU compliant precession + implementation can be constructed using various combinations of + the angles returned by the present function. + + 5) The parameterization used by ERFA is the version of the Fukushima- + Williams angles that refers directly to the GCRS pole. These + angles may be calculated by calling the function eraPfw06. ERFA + also supports the direct computation of the CIP GCRS X,Y by + series, available by calling eraXy06. + + 6) The agreement between the different parameterizations is at the + 1 microarcsecond level in the present era. + + 7) When constructing a precession formulation that refers to the GCRS + pole rather than the dynamical pole, it may (depending on the + choice of angles) be necessary to introduce the frame bias + explicitly. + + 8) It is permissible to re-use the same variable in the returned + arguments. The quantities are stored in the stated order. + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Called: + eraObl06 mean obliquity, IAU 2006 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + eps0_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + psia_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + oma_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + bpa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + bqa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pia_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + bpia_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + chia_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + za_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + zetaa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + thetaa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + gam_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + psi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, eps0_out, psia_out, oma_out, bpa_out, bqa_out, pia_out, bpia_out, epsa_out, chia_out, za_out, zetaa_out, thetaa_out, pa_out, gam_out, phi_out, psi_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*16 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._p06e(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(eps0_out.shape) > 0 and eps0_out.shape[0] == 1 + eps0_out = eps0_out.reshape(eps0_out.shape[1:]) + assert len(psia_out.shape) > 0 and psia_out.shape[0] == 1 + psia_out = psia_out.reshape(psia_out.shape[1:]) + assert len(oma_out.shape) > 0 and oma_out.shape[0] == 1 + oma_out = oma_out.reshape(oma_out.shape[1:]) + assert len(bpa_out.shape) > 0 and bpa_out.shape[0] == 1 + bpa_out = bpa_out.reshape(bpa_out.shape[1:]) + assert len(bqa_out.shape) > 0 and bqa_out.shape[0] == 1 + bqa_out = bqa_out.reshape(bqa_out.shape[1:]) + assert len(pia_out.shape) > 0 and pia_out.shape[0] == 1 + pia_out = pia_out.reshape(pia_out.shape[1:]) + assert len(bpia_out.shape) > 0 and bpia_out.shape[0] == 1 + bpia_out = bpia_out.reshape(bpia_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(chia_out.shape) > 0 and chia_out.shape[0] == 1 + chia_out = chia_out.reshape(chia_out.shape[1:]) + assert len(za_out.shape) > 0 and za_out.shape[0] == 1 + za_out = za_out.reshape(za_out.shape[1:]) + assert len(zetaa_out.shape) > 0 and zetaa_out.shape[0] == 1 + zetaa_out = zetaa_out.reshape(zetaa_out.shape[1:]) + assert len(thetaa_out.shape) > 0 and thetaa_out.shape[0] == 1 + thetaa_out = thetaa_out.reshape(thetaa_out.shape[1:]) + assert len(pa_out.shape) > 0 and pa_out.shape[0] == 1 + pa_out = pa_out.reshape(pa_out.shape[1:]) + assert len(gam_out.shape) > 0 and gam_out.shape[0] == 1 + gam_out = gam_out.reshape(gam_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(psi_out.shape) > 0 and psi_out.shape[0] == 1 + psi_out = psi_out.reshape(psi_out.shape[1:]) + + return eps0_out, psia_out, oma_out, bpa_out, bqa_out, pia_out, bpia_out, epsa_out, chia_out, za_out, zetaa_out, thetaa_out, pa_out, gam_out, phi_out, psi_out + + +def pb06(date1, date2): + """ + Wrapper for ERFA function ``eraPb06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + bzeta : double array + bz : double array + btheta : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P b 0 6 + - - - - - - - - + + This function forms three Euler angles which implement general + precession from epoch J2000.0, using the IAU 2006 model. Frame + bias (the offset between ICRS and mean J2000.0) is included. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + bzeta double 1st rotation: radians cw around z + bz double 3rd rotation: radians cw around z + btheta double 2nd rotation: radians ccw around y + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The traditional accumulated precession angles zeta_A, z_A, + theta_A cannot be obtained in the usual way, namely through + polynomial expressions, because of the frame bias. The latter + means that two of the angles undergo rapid changes near this + date. They are instead the results of decomposing the + precession-bias matrix obtained by using the Fukushima-Williams + method, which does not suffer from the problem. The + decomposition returns values which can be used in the + conventional formulation and which include frame bias. + + 3) The three angles are returned in the conventional order, which + is not the same as the order of the corresponding Euler + rotations. The precession-bias matrix is + R_3(-z) x R_2(+theta) x R_3(-zeta). + + 4) Should zeta_A, z_A, theta_A angles be required that do not + contain frame bias, they are available by calling the ERFA + function eraP06e. + + Called: + eraPmat06 PB matrix, IAU 2006 + eraRz rotate around Z-axis + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + bzeta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + bz_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + btheta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, bzeta_out, bz_out, btheta_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pb06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(bzeta_out.shape) > 0 and bzeta_out.shape[0] == 1 + bzeta_out = bzeta_out.reshape(bzeta_out.shape[1:]) + assert len(bz_out.shape) > 0 and bz_out.shape[0] == 1 + bz_out = bz_out.reshape(bz_out.shape[1:]) + assert len(btheta_out.shape) > 0 and btheta_out.shape[0] == 1 + btheta_out = btheta_out.reshape(btheta_out.shape[1:]) + + return bzeta_out, bz_out, btheta_out + + +def pfw06(date1, date2): + """ + Wrapper for ERFA function ``eraPfw06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + gamb : double array + phib : double array + psib : double array + epsa : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P f w 0 6 + - - - - - - - - - + + Precession angles, IAU 2006 (Fukushima-Williams 4-angle formulation). + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + gamb double F-W angle gamma_bar (radians) + phib double F-W angle phi_bar (radians) + psib double F-W angle psi_bar (radians) + epsa double F-W angle epsilon_A (radians) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) Naming the following points: + + e = J2000.0 ecliptic pole, + p = GCRS pole, + E = mean ecliptic pole of date, + and P = mean pole of date, + + the four Fukushima-Williams angles are as follows: + + gamb = gamma_bar = epE + phib = phi_bar = pE + psib = psi_bar = pEP + epsa = epsilon_A = EP + + 3) The matrix representing the combined effects of frame bias and + precession is: + + PxB = R_1(-epsa).R_3(-psib).R_1(phib).R_3(gamb) + + 4) The matrix representing the combined effects of frame bias, + precession and nutation is simply: + + NxPxB = R_1(-epsa-dE).R_3(-psib-dP).R_1(phib).R_3(gamb) + + where dP and dE are the nutation components with respect to the + ecliptic of date. + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Called: + eraObl06 mean obliquity, IAU 2006 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + gamb_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phib_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + psib_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, gamb_out, phib_out, psib_out, epsa_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*4 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pfw06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(gamb_out.shape) > 0 and gamb_out.shape[0] == 1 + gamb_out = gamb_out.reshape(gamb_out.shape[1:]) + assert len(phib_out.shape) > 0 and phib_out.shape[0] == 1 + phib_out = phib_out.reshape(phib_out.shape[1:]) + assert len(psib_out.shape) > 0 and psib_out.shape[0] == 1 + psib_out = psib_out.reshape(psib_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + + return gamb_out, phib_out, psib_out, epsa_out + + +def pmat00(date1, date2): + """ + Wrapper for ERFA function ``eraPmat00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rbp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P m a t 0 0 + - - - - - - - - - - + + Precession matrix (including frame bias) from GCRS to a specified + date, IAU 2000 model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rbp double[3][3] bias-precession matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rbp * V(GCRS), where + the p-vector V(GCRS) is with respect to the Geocentric Celestial + Reference System (IAU, 2000) and the p-vector V(date) is with + respect to the mean equatorial triad of the given date. + + Called: + eraBp00 frame bias and precession matrices, IAU 2000 + + Reference: + + IAU: Trans. International Astronomical Union, Vol. XXIVB; Proc. + 24th General Assembly, Manchester, UK. Resolutions B1.3, B1.6. + (2000) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmat00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + + return rbp_out + + +def pmat06(date1, date2): + """ + Wrapper for ERFA function ``eraPmat06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rbp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P m a t 0 6 + - - - - - - - - - - + + Precession matrix (including frame bias) from GCRS to a specified + date, IAU 2006 model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rbp double[3][3] bias-precession matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rbp * V(GCRS), where + the p-vector V(GCRS) is with respect to the Geocentric Celestial + Reference System (IAU, 2000) and the p-vector V(date) is with + respect to the mean equatorial triad of the given date. + + Called: + eraPfw06 bias-precession F-W angles, IAU 2006 + eraFw2m F-W angles to r-matrix + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmat06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + + return rbp_out + + +def pmat76(date1, date2): + """ + Wrapper for ERFA function ``eraPmat76``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P m a t 7 6 + - - - - - - - - - - + + Precession matrix from J2000.0 to a specified date, IAU 1976 model. + + Given: + date1,date2 double ending date, TT (Note 1) + + Returned: + rmatp double[3][3] precession matrix, J2000.0 -> date1+date2 + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = RMATP * V(J2000), + where the p-vector V(J2000) is with respect to the mean + equatorial triad of epoch J2000.0 and the p-vector V(date) + is with respect to the mean equatorial triad of the given + date. + + 3) Though the matrix method itself is rigorous, the precession + angles are expressed through canonical polynomials which are + valid only for a limited time span. In addition, the IAU 1976 + precession rate is known to be imperfect. The absolute accuracy + of the present formulation is better than 0.1 arcsec from + 1960AD to 2040AD, better than 1 arcsec from 1640AD to 2360AD, + and remains below 3 arcsec for the whole of the period + 500BC to 3000AD. The errors exceed 10 arcsec outside the + range 1200BC to 3900AD, exceed 100 arcsec outside 4200BC to + 5600AD and exceed 1000 arcsec outside 6800BC to 8200AD. + + Called: + eraPrec76 accumulated precession angles, IAU 1976 + eraIr initialize r-matrix to identity + eraRz rotate around Z-axis + eraRy rotate around Y-axis + eraCr copy r-matrix + + References: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282. + equations (6) & (7), p283. + + Kaplan,G.H., 1981. USNO circular no. 163, pA2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmat76(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatp_out.shape) > 0 and rmatp_out.shape[0] == 1 + rmatp_out = rmatp_out.reshape(rmatp_out.shape[1:]) + + return rmatp_out + + +def pn00(date1, date2, dpsi, deps): + """ + Wrapper for ERFA function ``eraPn00``. + + Parameters + ---------- + date1 : double array + date2 : double array + dpsi : double array + deps : double array + + Returns + ------- + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P n 0 0 + - - - - - - - - + + Precession-nutation, IAU 2000 model: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based + use indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + dpsi,deps double nutation (Note 2) + + Returned: + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Note 8) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The caller is responsible for providing the nutation components; + they are in longitude and obliquity, in radians and are with + respect to the equinox and ecliptic of date. For high-accuracy + applications, free core nutation should be included as well as + any other relevant corrections to the position of the CIP. + + 3) The returned mean obliquity is consistent with the IAU 2000 + precession-nutation models. + + 4) The matrix rb transforms vectors from GCRS to J2000.0 mean + equator and equinox by applying frame bias. + + 5) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 6) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 7) The matrix rn transforms vectors from mean equator and equinox of + date to true equator and equinox of date by applying the nutation + (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true equator and + equinox of date. It is the product rn x rbp, applying frame + bias, precession and nutation in that order. + + 9) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the order given. + + Called: + eraPr00 IAU 2000 precession adjustments + eraObl80 mean obliquity, IAU 1980 + eraBp00 frame bias and precession matrices, IAU 2000 + eraCr copy r-matrix + eraNumat form nutation matrix + eraRxr product of two r-matrices + + Reference: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + deps_in = numpy.array(deps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, dpsi_in, deps_in) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_in, deps_in, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pn00a(date1, date2): + """ + Wrapper for ERFA function ``eraPn00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P n 0 0 a + - - - - - - - - - + + Precession-nutation, IAU 2000A model: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based + use indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation (Note 2) + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Notes 8,9) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components (luni-solar + planetary, IAU 2000A) in + longitude and obliquity are in radians and with respect to the + equinox and ecliptic of date. Free core nutation is omitted; + for the utmost accuracy, use the eraPn00 function, where the + nutation components are caller-specified. For faster but + slightly less accurate results, use the eraPn00b function. + + 3) The mean obliquity is consistent with the IAU 2000 precession. + + 4) The matrix rb transforms vectors from GCRS to J2000.0 mean + equator and equinox by applying frame bias. + + 5) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 6) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 7) The matrix rn transforms vectors from mean equator and equinox + of date to true equator and equinox of date by applying the + nutation (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true equator and + equinox of date. It is the product rn x rbp, applying frame + bias, precession and nutation in that order. + + 9) The X,Y,Z coordinates of the IAU 2000A Celestial Intermediate + Pole are elements (3,1-3) of the GCRS-to-true matrix, + i.e. rbpn[2][0-2]. + + 10) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the order given. + + Called: + eraNut00a nutation, IAU 2000A + eraPn00 bias/precession/nutation results, IAU 2000 + + Reference: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*8 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return dpsi_out, deps_out, epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pn00b(date1, date2): + """ + Wrapper for ERFA function ``eraPn00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P n 0 0 b + - - - - - - - - - + + Precession-nutation, IAU 2000B model: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based + use indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation (Note 2) + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Notes 8,9) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components (luni-solar + planetary, IAU 2000B) in + longitude and obliquity are in radians and with respect to the + equinox and ecliptic of date. For more accurate results, but + at the cost of increased computation, use the eraPn00a function. + For the utmost accuracy, use the eraPn00 function, where the + nutation components are caller-specified. + + 3) The mean obliquity is consistent with the IAU 2000 precession. + + 4) The matrix rb transforms vectors from GCRS to J2000.0 mean + equator and equinox by applying frame bias. + + 5) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 6) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 7) The matrix rn transforms vectors from mean equator and equinox + of date to true equator and equinox of date by applying the + nutation (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true equator and + equinox of date. It is the product rn x rbp, applying frame + bias, precession and nutation in that order. + + 9) The X,Y,Z coordinates of the IAU 2000B Celestial Intermediate + Pole are elements (3,1-3) of the GCRS-to-true matrix, + i.e. rbpn[2][0-2]. + + 10) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the stated order. + + Called: + eraNut00b nutation, IAU 2000B + eraPn00 bias/precession/nutation results, IAU 2000 + + Reference: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003). + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*8 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return dpsi_out, deps_out, epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pn06(date1, date2, dpsi, deps): + """ + Wrapper for ERFA function ``eraPn06``. + + Parameters + ---------- + date1 : double array + date2 : double array + dpsi : double array + deps : double array + + Returns + ------- + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P n 0 6 + - - - - - - - - + + Precession-nutation, IAU 2006 model: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based use + indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + dpsi,deps double nutation (Note 2) + + Returned: + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Note 8) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The caller is responsible for providing the nutation components; + they are in longitude and obliquity, in radians and are with + respect to the equinox and ecliptic of date. For high-accuracy + applications, free core nutation should be included as well as + any other relevant corrections to the position of the CIP. + + 3) The returned mean obliquity is consistent with the IAU 2006 + precession. + + 4) The matrix rb transforms vectors from GCRS to J2000.0 mean + equator and equinox by applying frame bias. + + 5) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 6) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 7) The matrix rn transforms vectors from mean equator and equinox + of date to true equator and equinox of date by applying the + nutation (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true equator and + equinox of date. It is the product rn x rbp, applying frame + bias, precession and nutation in that order. + + 9) The X,Y,Z coordinates of the Celestial Intermediate Pole are + elements (3,1-3) of the GCRS-to-true matrix, i.e. rbpn[2][0-2]. + + 10) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the stated order. + + Called: + eraPfw06 bias-precession F-W angles, IAU 2006 + eraFw2m F-W angles to r-matrix + eraCr copy r-matrix + eraTr transpose r-matrix + eraRxr product of two r-matrices + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + deps_in = numpy.array(deps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, dpsi_in, deps_in) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_in, deps_in, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pn06a(date1, date2): + """ + Wrapper for ERFA function ``eraPn06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P n 0 6 a + - - - - - - - - - + + Precession-nutation, IAU 2006/2000A models: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based use + indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation (Note 2) + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Notes 8,9) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components (luni-solar + planetary, IAU 2000A) in + longitude and obliquity are in radians and with respect to the + equinox and ecliptic of date. Free core nutation is omitted; + for the utmost accuracy, use the eraPn06 function, where the + nutation components are caller-specified. + + 3) The mean obliquity is consistent with the IAU 2006 precession. + + 4) The matrix rb transforms vectors from GCRS to mean J2000.0 by + applying frame bias. + + 5) The matrix rp transforms vectors from mean J2000.0 to mean of + date by applying precession. + + 6) The matrix rbp transforms vectors from GCRS to mean of date by + applying frame bias then precession. It is the product rp x rb. + + 7) The matrix rn transforms vectors from mean of date to true of + date by applying the nutation (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true of date + (CIP/equinox). It is the product rn x rbp, applying frame bias, + precession and nutation in that order. + + 9) The X,Y,Z coordinates of the IAU 2006/2000A Celestial + Intermediate Pole are elements (3,1-3) of the GCRS-to-true + matrix, i.e. rbpn[2][0-2]. + + 10) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the stated order. + + Called: + eraNut06a nutation, IAU 2006/2000A + eraPn06 bias/precession/nutation results, IAU 2006 + + Reference: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*8 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return dpsi_out, deps_out, epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pnm00a(date1, date2): + """ + Wrapper for ERFA function ``eraPnm00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P n m 0 0 a + - - - - - - - - - - + + Form the matrix of precession-nutation for a given date (including + frame bias), equinox-based, IAU 2000A model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rbpn double[3][3] classical NPB matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rbpn * V(GCRS), where + the p-vector V(date) is with respect to the true equatorial triad + of date date1+date2 and the p-vector V(GCRS) is with respect to + the Geocentric Celestial Reference System (IAU, 2000). + + 3) A faster, but slightly less accurate result (about 1 mas), can be + obtained by using instead the eraPnm00b function. + + Called: + eraPn00a bias/precession/nutation, IAU 2000A + + Reference: + + IAU: Trans. International Astronomical Union, Vol. XXIVB; Proc. + 24th General Assembly, Manchester, UK. Resolutions B1.3, B1.6. + (2000) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pnm00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return rbpn_out + + +def pnm00b(date1, date2): + """ + Wrapper for ERFA function ``eraPnm00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P n m 0 0 b + - - - - - - - - - - + + Form the matrix of precession-nutation for a given date (including + frame bias), equinox-based, IAU 2000B model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rbpn double[3][3] bias-precession-nutation matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rbpn * V(GCRS), where + the p-vector V(date) is with respect to the true equatorial triad + of date date1+date2 and the p-vector V(GCRS) is with respect to + the Geocentric Celestial Reference System (IAU, 2000). + + 3) The present function is faster, but slightly less accurate (about + 1 mas), than the eraPnm00a function. + + Called: + eraPn00b bias/precession/nutation, IAU 2000B + + Reference: + + IAU: Trans. International Astronomical Union, Vol. XXIVB; Proc. + 24th General Assembly, Manchester, UK. Resolutions B1.3, B1.6. + (2000) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pnm00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return rbpn_out + + +def pnm06a(date1, date2): + """ + Wrapper for ERFA function ``eraPnm06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rnpb : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P n m 0 6 a + - - - - - - - - - - + + Form the matrix of precession-nutation for a given date (including + frame bias), IAU 2006 precession and IAU 2000A nutation models. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rnpb double[3][3] bias-precession-nutation matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rnpb * V(GCRS), where + the p-vector V(date) is with respect to the true equatorial triad + of date date1+date2 and the p-vector V(GCRS) is with respect to + the Geocentric Celestial Reference System (IAU, 2000). + + Called: + eraPfw06 bias-precession F-W angles, IAU 2006 + eraNut06a nutation, IAU 2006/2000A + eraFw2m F-W angles to r-matrix + + Reference: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rnpb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rnpb_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pnm06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rnpb_out.shape) > 0 and rnpb_out.shape[0] == 1 + rnpb_out = rnpb_out.reshape(rnpb_out.shape[1:]) + + return rnpb_out + + +def pnm80(date1, date2): + """ + Wrapper for ERFA function ``eraPnm80``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P n m 8 0 + - - - - - - - - - + + Form the matrix of precession/nutation for a given date, IAU 1976 + precession model, IAU 1980 nutation model. + + Given: + date1,date2 double TDB date (Note 1) + + Returned: + rmatpn double[3][3] combined precession/nutation matrix + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rmatpn * V(J2000), + where the p-vector V(date) is with respect to the true equatorial + triad of date date1+date2 and the p-vector V(J2000) is with + respect to the mean equatorial triad of epoch J2000.0. + + Called: + eraPmat76 precession matrix, IAU 1976 + eraNutm80 nutation matrix, IAU 1980 + eraRxr product of two r-matrices + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.3 (p145). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pnm80(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatpn_out.shape) > 0 and rmatpn_out.shape[0] == 1 + rmatpn_out = rmatpn_out.reshape(rmatpn_out.shape[1:]) + + return rmatpn_out + + +def pom00(xp, yp, sp): + """ + Wrapper for ERFA function ``eraPom00``. + + Parameters + ---------- + xp : double array + yp : double array + sp : double array + + Returns + ------- + rpom : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P o m 0 0 + - - - - - - - - - - + + Form the matrix of polar motion for a given date, IAU 2000. + + Given: + xp,yp double coordinates of the pole (radians, Note 1) + sp double the TIO locator s' (radians, Note 2) + + Returned: + rpom double[3][3] polar-motion matrix (Note 3) + + Notes: + + 1) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 2) The argument sp is the TIO locator s', in radians, which + positions the Terrestrial Intermediate Origin on the equator. It + is obtained from polar motion observations by numerical + integration, and so is in essence unpredictable. However, it is + dominated by a secular drift of about 47 microarcseconds per + century, and so can be taken into account by using s' = -47*t, + where t is centuries since J2000.0. The function eraSp00 + implements this approximation. + + 3) The matrix operates in the sense V(TRS) = rpom * V(CIP), meaning + that it is the final rotation when computing the pointing + direction to a celestial source. + + Called: + eraIr initialize r-matrix to identity + eraRz rotate around Z-axis + eraRy rotate around Y-axis + eraRx rotate around X-axis + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + sp_in = numpy.array(sp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), xp_in, yp_in, sp_in) + rpom_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [xp_in, yp_in, sp_in, rpom_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pom00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rpom_out.shape) > 0 and rpom_out.shape[0] == 1 + rpom_out = rpom_out.reshape(rpom_out.shape[1:]) + + return rpom_out + + +def pr00(date1, date2): + """ + Wrapper for ERFA function ``eraPr00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsipr : double array + depspr : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P r 0 0 + - - - - - - - - + + Precession-rate part of the IAU 2000 precession-nutation models + (part of MHB2000). + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsipr,depspr double precession corrections (Notes 2,3) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The precession adjustments are expressed as "nutation + components", corrections in longitude and obliquity with respect + to the J2000.0 equinox and ecliptic. + + 3) Although the precession adjustments are stated to be with respect + to Lieske et al. (1977), the MHB2000 model does not specify which + set of Euler angles are to be used and how the adjustments are to + be applied. The most literal and straightforward procedure is to + adopt the 4-rotation epsilon_0, psi_A, omega_A, xi_A option, and + to add dpsipr to psi_A and depspr to both omega_A and eps_A. + + 4) This is an implementation of one aspect of the IAU 2000A nutation + model, formally adopted by the IAU General Assembly in 2000, + namely MHB2000 (Mathews et al. 2002). + + References: + + Lieske, J.H., Lederle, T., Fricke, W. & Morando, B., "Expressions + for the precession quantities based upon the IAU (1976) System of + Astronomical Constants", Astron.Astrophys., 58, 1-16 (1977) + + Mathews, P.M., Herring, T.A., Buffet, B.A., "Modeling of nutation + and precession New nutation series for nonrigid Earth and + insights into the Earth's interior", J.Geophys.Res., 107, B4, + 2002. The MHB2000 code itself was obtained on 9th September 2002 + from ftp://maia.usno.navy.mil/conv2000/chapter5/IAU2000A. + + Wallace, P.T., "Software for Implementing the IAU 2000 + Resolutions", in IERS Workshop 5.1 (2002). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsipr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + depspr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsipr_out, depspr_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pr00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsipr_out.shape) > 0 and dpsipr_out.shape[0] == 1 + dpsipr_out = dpsipr_out.reshape(dpsipr_out.shape[1:]) + assert len(depspr_out.shape) > 0 and depspr_out.shape[0] == 1 + depspr_out = depspr_out.reshape(depspr_out.shape[1:]) + + return dpsipr_out, depspr_out + + +def prec76(date01, date02, date11, date12): + """ + Wrapper for ERFA function ``eraPrec76``. + + Parameters + ---------- + date01 : double array + date02 : double array + date11 : double array + date12 : double array + + Returns + ------- + zeta : double array + z : double array + theta : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P r e c 7 6 + - - - - - - - - - - + + IAU 1976 precession model. + + This function forms the three Euler angles which implement general + precession between two dates, using the IAU 1976 model (as for the + FK5 catalog). + + Given: + date01,date02 double TDB starting date (Note 1) + date11,date12 double TDB ending date (Note 1) + + Returned: + zeta double 1st rotation: radians cw around z + z double 3rd rotation: radians cw around z + theta double 2nd rotation: radians ccw around y + + Notes: + + 1) The dates date01+date02 and date11+date12 are Julian Dates, + apportioned in any convenient way between the arguments daten1 + and daten2. For example, JD(TDB)=2450123.7 could be expressed in + any of these ways, among others: + + daten1 daten2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + The two dates may be expressed using different methods, but at + the risk of losing some resolution. + + 2) The accumulated precession angles zeta, z, theta are expressed + through canonical polynomials which are valid only for a limited + time span. In addition, the IAU 1976 precession rate is known to + be imperfect. The absolute accuracy of the present formulation + is better than 0.1 arcsec from 1960AD to 2040AD, better than + 1 arcsec from 1640AD to 2360AD, and remains below 3 arcsec for + the whole of the period 500BC to 3000AD. The errors exceed + 10 arcsec outside the range 1200BC to 3900AD, exceed 100 arcsec + outside 4200BC to 5600AD and exceed 1000 arcsec outside 6800BC to + 8200AD. + + 3) The three angles are returned in the conventional order, which + is not the same as the order of the corresponding Euler + rotations. The precession matrix is + R_3(-z) x R_2(+theta) x R_3(-zeta). + + Reference: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282, equations + (6) & (7), p283. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date01_in = numpy.array(date01, dtype=numpy.double, order="C", copy=False, subok=True) + date02_in = numpy.array(date02, dtype=numpy.double, order="C", copy=False, subok=True) + date11_in = numpy.array(date11, dtype=numpy.double, order="C", copy=False, subok=True) + date12_in = numpy.array(date12, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date01_in, date02_in, date11_in, date12_in) + zeta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + z_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + theta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date01_in, date02_in, date11_in, date12_in, zeta_out, z_out, theta_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._prec76(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(zeta_out.shape) > 0 and zeta_out.shape[0] == 1 + zeta_out = zeta_out.reshape(zeta_out.shape[1:]) + assert len(z_out.shape) > 0 and z_out.shape[0] == 1 + z_out = z_out.reshape(z_out.shape[1:]) + assert len(theta_out.shape) > 0 and theta_out.shape[0] == 1 + theta_out = theta_out.reshape(theta_out.shape[1:]) + + return zeta_out, z_out, theta_out + + +def s00(date1, date2, x, y): + """ + Wrapper for ERFA function ``eraS00``. + + Parameters + ---------- + date1 : double array + date2 : double array + x : double array + y : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a S 0 0 + - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, given the CIP's X,Y + coordinates. Compatible with IAU 2000A precession-nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + x,y double CIP coordinates (Note 3) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems: the two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The quantity s remains below 0.1 arcsecond + throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series + is more compact than a direct series for s would be. This + function requires X,Y to be supplied by the caller, who is + responsible for providing values that are consistent with the + supplied date. + + 4) The model is consistent with the IAU 2000A precession-nutation. + + Called: + eraFal03 mean anomaly of the Moon + eraFalp03 mean anomaly of the Sun + eraFaf03 mean argument of the latitude of the Moon + eraFad03 mean elongation of the Moon from the Sun + eraFaom03 mean longitude of the Moon's ascending node + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFapa03 general accumulated precession in longitude + + References: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, x_in, y_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_in, y_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def s00a(date1, date2): + """ + Wrapper for ERFA function ``eraS00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S 0 0 a + - - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, using the IAU 2000A + precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems. The two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The CIO locator s remains a small fraction of + 1 arcsecond throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series + is more compact than a direct series for s would be. The present + function uses the full IAU 2000A nutation model when predicting + the CIP position. Faster results, with no significant loss of + accuracy, can be obtained via the function eraS00b, which uses + instead the IAU 2000B truncated model. + + Called: + eraPnm00a classical NPB matrix, IAU 2000A + eraBnp2xy extract CIP X,Y from the BPN matrix + eraS00 the CIO locator s, given X,Y, IAU 2000A + + References: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def s00b(date1, date2): + """ + Wrapper for ERFA function ``eraS00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S 0 0 b + - - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, using the IAU 2000B + precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems. The two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The CIO locator s remains a small fraction of + 1 arcsecond throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series + is more compact than a direct series for s would be. The present + function uses the IAU 2000B truncated nutation model when + predicting the CIP position. The function eraS00a uses instead + the full IAU 2000A model, but with no significant increase in + accuracy and at some cost in speed. + + Called: + eraPnm00b classical NPB matrix, IAU 2000B + eraBnp2xy extract CIP X,Y from the BPN matrix + eraS00 the CIO locator s, given X,Y, IAU 2000A + + References: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def s06(date1, date2, x, y): + """ + Wrapper for ERFA function ``eraS06``. + + Parameters + ---------- + date1 : double array + date2 : double array + x : double array + y : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a S 0 6 + - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, given the CIP's X,Y + coordinates. Compatible with IAU 2006/2000A precession-nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + x,y double CIP coordinates (Note 3) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems: the two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The quantity s remains below 0.1 arcsecond + throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series + is more compact than a direct series for s would be. This + function requires X,Y to be supplied by the caller, who is + responsible for providing values that are consistent with the + supplied date. + + 4) The model is consistent with the "P03" precession (Capitaine et + al. 2003), adopted by IAU 2006 Resolution 1, 2006, and the + IAU 2000A nutation (with P03 adjustments). + + Called: + eraFal03 mean anomaly of the Moon + eraFalp03 mean anomaly of the Sun + eraFaf03 mean argument of the latitude of the Moon + eraFad03 mean elongation of the Moon from the Sun + eraFaom03 mean longitude of the Moon's ascending node + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFapa03 general accumulated precession in longitude + + References: + + Capitaine, N., Wallace, P.T. & Chapront, J., 2003, Astron. + Astrophys. 432, 355 + + McCarthy, D.D., Petit, G. (eds.) 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, x_in, y_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_in, y_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def s06a(date1, date2): + """ + Wrapper for ERFA function ``eraS06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S 0 6 a + - - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, using the IAU 2006 + precession and IAU 2000A nutation models. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems. The two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The CIO locator s remains a small fraction of + 1 arcsecond throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series is + more compact than a direct series for s would be. The present + function uses the full IAU 2000A nutation model when predicting + the CIP position. + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + + References: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def sp00(date1, date2): + """ + Wrapper for ERFA function ``eraSp00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S p 0 0 + - - - - - - - - + + The TIO locator s', positioning the Terrestrial Intermediate Origin + on the equator of the Celestial Intermediate Pole. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double the TIO locator s' in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The TIO locator s' is obtained from polar motion observations by + numerical integration, and so is in essence unpredictable. + However, it is dominated by a secular drift of about + 47 microarcseconds per century, which is the approximation + evaluated by the present function. + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._sp00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def xy06(date1, date2): + """ + Wrapper for ERFA function ``eraXy06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + x : double array + y : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a X y 0 6 + - - - - - - - - + + X,Y coordinates of celestial intermediate pole from series based + on IAU 2006 precession and IAU 2000A nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + x,y double CIP X,Y coordinates (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The X,Y coordinates are those of the unit vector towards the + celestial intermediate pole. They represent the combined effects + of frame bias, precession and nutation. + + 3) The fundamental arguments used are as adopted in IERS Conventions + (2003) and are from Simon et al. (1994) and Souchay et al. + (1999). + + 4) This is an alternative to the angles-based method, via the ERFA + function eraFw2xy and as used in eraXys06a for example. The two + methods agree at the 1 microarcsecond level (at present), a + negligible amount compared with the intrinsic accuracy of the + models. However, it would be unwise to mix the two methods + (angles-based and series-based) in a single application. + + Called: + eraFal03 mean anomaly of the Moon + eraFalp03 mean anomaly of the Sun + eraFaf03 mean argument of the latitude of the Moon + eraFad03 mean elongation of the Moon from the Sun + eraFaom03 mean longitude of the Moon's ascending node + eraFame03 mean longitude of Mercury + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFama03 mean longitude of Mars + eraFaju03 mean longitude of Jupiter + eraFasa03 mean longitude of Saturn + eraFaur03 mean longitude of Uranus + eraFane03 mean longitude of Neptune + eraFapa03 general accumulated precession in longitude + + References: + + Capitaine, N., Wallace, P.T. & Chapront, J., 2003, + Astron.Astrophys., 412, 567 + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Simon, J.L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G. & Laskar, J., Astron.Astrophys., 1994, 282, 663 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M., 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_out, y_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._xy06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + + return x_out, y_out + + +def xys00a(date1, date2): + """ + Wrapper for ERFA function ``eraXys00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + x : double array + y : double array + s : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a X y s 0 0 a + - - - - - - - - - - + + For a given TT date, compute the X,Y coordinates of the Celestial + Intermediate Pole and the CIO locator s, using the IAU 2000A + precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + x,y double Celestial Intermediate Pole (Note 2) + s double the CIO locator s (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The Celestial Intermediate Pole coordinates are the x,y + components of the unit vector in the Geocentric Celestial + Reference System. + + 3) The CIO locator s (in radians) positions the Celestial + Intermediate Origin on the equator of the CIP. + + 4) A faster, but slightly less accurate result (about 1 mas for + X,Y), can be obtained by using instead the eraXys00b function. + + Called: + eraPnm00a classical NPB matrix, IAU 2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS00 the CIO locator s, given X,Y, IAU 2000A + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + s_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_out, y_out, s_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._xys00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + assert len(s_out.shape) > 0 and s_out.shape[0] == 1 + s_out = s_out.reshape(s_out.shape[1:]) + + return x_out, y_out, s_out + + +def xys00b(date1, date2): + """ + Wrapper for ERFA function ``eraXys00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + x : double array + y : double array + s : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a X y s 0 0 b + - - - - - - - - - - + + For a given TT date, compute the X,Y coordinates of the Celestial + Intermediate Pole and the CIO locator s, using the IAU 2000B + precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + x,y double Celestial Intermediate Pole (Note 2) + s double the CIO locator s (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The Celestial Intermediate Pole coordinates are the x,y + components of the unit vector in the Geocentric Celestial + Reference System. + + 3) The CIO locator s (in radians) positions the Celestial + Intermediate Origin on the equator of the CIP. + + 4) The present function is faster, but slightly less accurate (about + 1 mas in X,Y), than the eraXys00a function. + + Called: + eraPnm00b classical NPB matrix, IAU 2000B + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS00 the CIO locator s, given X,Y, IAU 2000A + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + s_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_out, y_out, s_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._xys00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + assert len(s_out.shape) > 0 and s_out.shape[0] == 1 + s_out = s_out.reshape(s_out.shape[1:]) + + return x_out, y_out, s_out + + +def xys06a(date1, date2): + """ + Wrapper for ERFA function ``eraXys06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + x : double array + y : double array + s : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a X y s 0 6 a + - - - - - - - - - - + + For a given TT date, compute the X,Y coordinates of the Celestial + Intermediate Pole and the CIO locator s, using the IAU 2006 + precession and IAU 2000A nutation models. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + x,y double Celestial Intermediate Pole (Note 2) + s double the CIO locator s (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The Celestial Intermediate Pole coordinates are the x,y components + of the unit vector in the Geocentric Celestial Reference System. + + 3) The CIO locator s (in radians) positions the Celestial + Intermediate Origin on the equator of the CIP. + + 4) Series-based solutions for generating X and Y are also available: + see Capitaine & Wallace (2006) and eraXy06. + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + s_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_out, y_out, s_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._xys06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + assert len(s_out.shape) > 0 and s_out.shape[0] == 1 + s_out = s_out.reshape(s_out.shape[1:]) + + return x_out, y_out, s_out + + +def ee00(date1, date2, epsa, dpsi): + """ + Wrapper for ERFA function ``eraEe00``. + + Parameters + ---------- + date1 : double array + date2 : double array + epsa : double array + dpsi : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a E e 0 0 + - - - - - - - - + + The equation of the equinoxes, compatible with IAU 2000 resolutions, + given the nutation in longitude and the mean obliquity. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + epsa double mean obliquity (Note 2) + dpsi double nutation in longitude (Note 3) + + Returned (function value): + double equation of the equinoxes (Note 4) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The obliquity, in radians, is mean of date. + + 3) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + 4) The result is compatible with the IAU 2000 resolutions. For + further details, see IERS Conventions 2003 and Capitaine et al. + (2002). + + Called: + eraEect00 equation of the equinoxes complementary terms + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + epsa_in = numpy.array(epsa, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, epsa_in, dpsi_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, epsa_in, dpsi_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ee00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def ee00a(date1, date2): + """ + Wrapper for ERFA function ``eraEe00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E e 0 0 a + - - - - - - - - - + + Equation of the equinoxes, compatible with IAU 2000 resolutions. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double equation of the equinoxes (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + 3) The result is compatible with the IAU 2000 resolutions. For + further details, see IERS Conventions 2003 and Capitaine et al. + (2002). + + Called: + eraPr00 IAU 2000 precession adjustments + eraObl80 mean obliquity, IAU 1980 + eraNut00a nutation, IAU 2000A + eraEe00 equation of the equinoxes, IAU 2000 + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003). + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ee00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def ee00b(date1, date2): + """ + Wrapper for ERFA function ``eraEe00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E e 0 0 b + - - - - - - - - - + + Equation of the equinoxes, compatible with IAU 2000 resolutions but + using the truncated nutation model IAU 2000B. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double equation of the equinoxes (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + 3) The result is compatible with the IAU 2000 resolutions except + that accuracy has been compromised for the sake of speed. For + further details, see McCarthy & Luzum (2001), IERS Conventions + 2003 and Capitaine et al. (2003). + + Called: + eraPr00 IAU 2000 precession adjustments + eraObl80 mean obliquity, IAU 1980 + eraNut00b nutation, IAU 2000B + eraEe00 equation of the equinoxes, IAU 2000 + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D.D. & Luzum, B.J., "An abridged model of the + precession-nutation of the celestial pole", Celestial Mechanics & + Dynamical Astronomy, 85, 37-49 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ee00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def ee06a(date1, date2): + """ + Wrapper for ERFA function ``eraEe06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E e 0 6 a + - - - - - - - - - + + Equation of the equinoxes, compatible with IAU 2000 resolutions and + IAU 2006/2000A precession-nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double equation of the equinoxes (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + Called: + eraAnpm normalize angle into range +/- pi + eraGst06a Greenwich apparent sidereal time, IAU 2006/2000A + eraGmst06 Greenwich mean sidereal time, IAU 2006 + + Reference: + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ee06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def eect00(date1, date2): + """ + Wrapper for ERFA function ``eraEect00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E e c t 0 0 + - - - - - - - - - - + + Equation of the equinoxes complementary terms, consistent with + IAU 2000 resolutions. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double complementary terms (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The "complementary terms" are part of the equation of the + equinoxes (EE), classically the difference between apparent and + mean Sidereal Time: + + GAST = GMST + EE + + with: + + EE = dpsi * cos(eps) + + where dpsi is the nutation in longitude and eps is the obliquity + of date. However, if the rotation of the Earth were constant in + an inertial frame the classical formulation would lead to + apparent irregularities in the UT1 timescale traceable to side- + effects of precession-nutation. In order to eliminate these + effects from UT1, "complementary terms" were introduced in 1994 + (IAU, 1994) and took effect from 1997 (Capitaine and Gontier, + 1993): + + GAST = GMST + CT + EE + + By convention, the complementary terms are included as part of + the equation of the equinoxes rather than as part of the mean + Sidereal Time. This slightly compromises the "geometrical" + interpretation of mean sidereal time but is otherwise + inconsequential. + + The present function computes CT in the above expression, + compatible with IAU 2000 resolutions (Capitaine et al., 2002, and + IERS Conventions 2003). + + Called: + eraFal03 mean anomaly of the Moon + eraFalp03 mean anomaly of the Sun + eraFaf03 mean argument of the latitude of the Moon + eraFad03 mean elongation of the Moon from the Sun + eraFaom03 mean longitude of the Moon's ascending node + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFapa03 general accumulated precession in longitude + + References: + + Capitaine, N. & Gontier, A.-M., Astron. Astrophys., 275, + 645-650 (1993) + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + IAU Resolution C7, Recommendation 3 (1994) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eect00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def eqeq94(date1, date2): + """ + Wrapper for ERFA function ``eraEqeq94``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E q e q 9 4 + - - - - - - - - - - + + Equation of the equinoxes, IAU 1994 model. + + Given: + date1,date2 double TDB date (Note 1) + + Returned (function value): + double equation of the equinoxes (Note 2) + + Notes: + + 1) The date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + Called: + eraAnpm normalize angle into range +/- pi + eraNut80 nutation, IAU 1980 + eraObl80 mean obliquity, IAU 1980 + + References: + + IAU Resolution C7, Recommendation 3 (1994). + + Capitaine, N. & Gontier, A.-M., 1993, Astron. Astrophys., 275, + 645-650. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eqeq94(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def era00(dj1, dj2): + """ + Wrapper for ERFA function ``eraEra00``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E r a 0 0 + - - - - - - - - - + + Earth rotation angle (IAU 2000 model). + + Given: + dj1,dj2 double UT1 as a 2-part Julian Date (see note) + + Returned (function value): + double Earth rotation angle (radians), range 0-2pi + + Notes: + + 1) The UT1 date dj1+dj2 is a Julian Date, apportioned in any + convenient way between the arguments dj1 and dj2. For example, + JD(UT1)=2450123.7 could be expressed in any of these ways, + among others: + + dj1 dj2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. The date & time method is + best matched to the algorithm used: maximum precision is + delivered when the dj1 argument is for 0hrs UT1 on the day in + question and the dj2 argument lies in the range 0 to 1, or vice + versa. + + 2) The algorithm is adapted from Expression 22 of Capitaine et al. + 2000. The time argument has been expressed in days directly, + and, to retain precision, integer contributions have been + eliminated. The same formulation is given in IERS Conventions + (2003), Chap. 5, Eq. 14. + + Called: + eraAnp normalize angle into range 0 to 2pi + + References: + + Capitaine N., Guinot B. and McCarthy D.D, 2000, Astron. + Astrophys., 355, 398-405. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._era00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gmst00(uta, utb, tta, ttb): + """ + Wrapper for ERFA function ``eraGmst00``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G m s t 0 0 + - - - - - - - - - - + + Greenwich mean sidereal time (model consistent with IAU 2000 + resolutions). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich mean sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + Rotation Angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession. If UT1 is used for + both purposes, errors of order 100 microarcseconds result. + + 3) This GMST is compatible with the IAU 2000 resolutions and must be + used only in conjunction with other IAU 2000 compatible + components such as precession-nutation and equation of the + equinoxes. + + 4) The result is returned in the range 0 to 2pi. + + 5) The algorithm is from Capitaine et al. (2003) and IERS + Conventions 2003. + + Called: + eraEra00 Earth rotation angle, IAU 2000 + eraAnp normalize angle into range 0 to 2pi + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gmst00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gmst06(uta, utb, tta, ttb): + """ + Wrapper for ERFA function ``eraGmst06``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G m s t 0 6 + - - - - - - - - - - + + Greenwich mean sidereal time (consistent with IAU 2006 precession). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich mean sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + rotation angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession. If UT1 is used for + both purposes, errors of order 100 microarcseconds result. + + 3) This GMST is compatible with the IAU 2006 precession and must not + be used with other precession models. + + 4) The result is returned in the range 0 to 2pi. + + Called: + eraEra00 Earth rotation angle, IAU 2000 + eraAnp normalize angle into range 0 to 2pi + + Reference: + + Capitaine, N., Wallace, P.T. & Chapront, J., 2005, + Astron.Astrophys. 432, 355 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gmst06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gmst82(dj1, dj2): + """ + Wrapper for ERFA function ``eraGmst82``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G m s t 8 2 + - - - - - - - - - - + + Universal Time to Greenwich mean sidereal time (IAU 1982 model). + + Given: + dj1,dj2 double UT1 Julian Date (see note) + + Returned (function value): + double Greenwich mean sidereal time (radians) + + Notes: + + 1) The UT1 date dj1+dj2 is a Julian Date, apportioned in any + convenient way between the arguments dj1 and dj2. For example, + JD(UT1)=2450123.7 could be expressed in any of these ways, + among others: + + dj1 dj2 + + 2450123.7 0 (JD method) + 2451545 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. The date & time method is + best matched to the algorithm used: maximum accuracy (or, at + least, minimum noise) is delivered when the dj1 argument is for + 0hrs UT1 on the day in question and the dj2 argument lies in the + range 0 to 1, or vice versa. + + 2) The algorithm is based on the IAU 1982 expression. This is + always described as giving the GMST at 0 hours UT1. In fact, it + gives the difference between the GMST and the UT, the steady + 4-minutes-per-day drawing-ahead of ST with respect to UT. When + whole days are ignored, the expression happens to equal the GMST + at 0 hours UT1 each day. + + 3) In this function, the entire UT1 (the sum of the two arguments + dj1 and dj2) is used directly as the argument for the standard + formula, the constant term of which is adjusted by 12 hours to + take account of the noon phasing of Julian Date. The UT1 is then + added, but omitting whole days to conserve accuracy. + + Called: + eraAnp normalize angle into range 0 to 2pi + + References: + + Transactions of the International Astronomical Union, + XVIII B, 67 (1983). + + Aoki et al., Astron. Astrophys. 105, 359-361 (1982). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gmst82(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst00a(uta, utb, tta, ttb): + """ + Wrapper for ERFA function ``eraGst00a``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G s t 0 0 a + - - - - - - - - - - + + Greenwich apparent sidereal time (consistent with IAU 2000 + resolutions). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + Rotation Angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession-nutation. If UT1 is + used for both purposes, errors of order 100 microarcseconds + result. + + 3) This GAST is compatible with the IAU 2000 resolutions and must be + used only in conjunction with other IAU 2000 compatible + components such as precession-nutation. + + 4) The result is returned in the range 0 to 2pi. + + 5) The algorithm is from Capitaine et al. (2003) and IERS + Conventions 2003. + + Called: + eraGmst00 Greenwich mean sidereal time, IAU 2000 + eraEe00a equation of the equinoxes, IAU 2000A + eraAnp normalize angle into range 0 to 2pi + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst00b(uta, utb): + """ + Wrapper for ERFA function ``eraGst00b``. + + Parameters + ---------- + uta : double array + utb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G s t 0 0 b + - - - - - - - - - - + + Greenwich apparent sidereal time (consistent with IAU 2000 + resolutions but using the truncated nutation model IAU 2000B). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 date uta+utb is a Julian Date, apportioned in any + convenient way between the argument pair. For example, + JD=2450123.7 could be expressed in any of these ways, among + others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + Rotation Angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) The result is compatible with the IAU 2000 resolutions, except + that accuracy has been compromised for the sake of speed and + convenience in two respects: + + . UT is used instead of TDB (or TT) to compute the precession + component of GMST and the equation of the equinoxes. This + results in errors of order 0.1 mas at present. + + . The IAU 2000B abridged nutation model (McCarthy & Luzum, 2001) + is used, introducing errors of up to 1 mas. + + 3) This GAST is compatible with the IAU 2000 resolutions and must be + used only in conjunction with other IAU 2000 compatible + components such as precession-nutation. + + 4) The result is returned in the range 0 to 2pi. + + 5) The algorithm is from Capitaine et al. (2003) and IERS + Conventions 2003. + + Called: + eraGmst00 Greenwich mean sidereal time, IAU 2000 + eraEe00b equation of the equinoxes, IAU 2000B + eraAnp normalize angle into range 0 to 2pi + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D.D. & Luzum, B.J., "An abridged model of the + precession-nutation of the celestial pole", Celestial Mechanics & + Dynamical Astronomy, 85, 37-49 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst06(uta, utb, tta, ttb, rnpb): + """ + Wrapper for ERFA function ``eraGst06``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + rnpb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a G s t 0 6 + - - - - - - - - - + + Greenwich apparent sidereal time, IAU 2006, given the NPB matrix. + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + rnpb double[3][3] nutation x precession x bias matrix + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + rotation angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession-nutation. If UT1 is + used for both purposes, errors of order 100 microarcseconds + result. + + 3) Although the function uses the IAU 2006 series for s+XY/2, it is + otherwise independent of the precession-nutation model and can in + practice be used with any equinox-based NPB matrix. + + 4) The result is returned in the range 0 to 2pi. + + Called: + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraAnp normalize angle into range 0 to 2pi + eraEra00 Earth rotation angle, IAU 2000 + eraEors equation of the origins, given NPB matrix and s + + Reference: + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + rnpb_in = numpy.array(rnpb, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rnpb_in, (3, 3), "rnpb") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in, rnpb_in[...,0,0]) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, rnpb_in[...,0,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*5 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst06a(uta, utb, tta, ttb): + """ + Wrapper for ERFA function ``eraGst06a``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G s t 0 6 a + - - - - - - - - - - + + Greenwich apparent sidereal time (consistent with IAU 2000 and 2006 + resolutions). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + rotation angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession-nutation. If UT1 is + used for both purposes, errors of order 100 microarcseconds + result. + + 3) This GAST is compatible with the IAU 2000/2006 resolutions and + must be used only in conjunction with IAU 2006 precession and + IAU 2000A nutation. + + 4) The result is returned in the range 0 to 2pi. + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraGst06 Greenwich apparent ST, IAU 2006, given NPB matrix + + Reference: + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst94(uta, utb): + """ + Wrapper for ERFA function ``eraGst94``. + + Parameters + ---------- + uta : double array + utb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a G s t 9 4 + - - - - - - - - - + + Greenwich apparent sidereal time (consistent with IAU 1982/94 + resolutions). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 date uta+utb is a Julian Date, apportioned in any + convenient way between the argument pair. For example, + JD=2450123.7 could be expressed in any of these ways, among + others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + Rotation Angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) The result is compatible with the IAU 1982 and 1994 resolutions, + except that accuracy has been compromised for the sake of + convenience in that UT is used instead of TDB (or TT) to compute + the equation of the equinoxes. + + 3) This GAST must be used only in conjunction with contemporaneous + IAU standards such as 1976 precession, 1980 obliquity and 1982 + nutation. It is not compatible with the IAU 2000 resolutions. + + 4) The result is returned in the range 0 to 2pi. + + Called: + eraGmst82 Greenwich mean sidereal time, IAU 1982 + eraEqeq94 equation of the equinoxes, IAU 1994 + eraAnp normalize angle into range 0 to 2pi + + References: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + IAU Resolution C7, Recommendation 3 (1994) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst94(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def pvstar(pv): + """ + Wrapper for ERFA function ``eraPvstar``. + + Parameters + ---------- + pv : double array + + Returns + ------- + ra : double array + dec : double array + pmr : double array + pmd : double array + px : double array + rv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P v s t a r + - - - - - - - - - - + + Convert star position+velocity vector to catalog coordinates. + + Given (Note 1): + pv double[2][3] pv-vector (au, au/day) + + Returned (Note 2): + ra double right ascension (radians) + dec double declination (radians) + pmr double RA proper motion (radians/year) + pmd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, positive = receding) + + Returned (function value): + int status: + 0 = OK + -1 = superluminal speed (Note 5) + -2 = null position vector + + Notes: + + 1) The specified pv-vector is the coordinate direction (and its rate + of change) for the date at which the light leaving the star + reached the solar-system barycenter. + + 2) The star data returned by this function are "observables" for an + imaginary observer at the solar-system barycenter. Proper motion + and radial velocity are, strictly, in terms of barycentric + coordinate time, TCB. For most practical applications, it is + permissible to neglect the distinction between TCB and ordinary + "proper" time on Earth (TT/TAI). The result will, as a rule, be + limited by the intrinsic accuracy of the proper-motion and + radial-velocity data; moreover, the supplied pv-vector is likely + to be merely an intermediate result (for example generated by the + function eraStarpv), so that a change of time unit will cancel + out overall. + + In accordance with normal star-catalog conventions, the object's + right ascension and declination are freed from the effects of + secular aberration. The frame, which is aligned to the catalog + equator and equinox, is Lorentzian and centered on the SSB. + + Summarizing, the specified pv-vector is for most stars almost + identical to the result of applying the standard geometrical + "space motion" transformation to the catalog data. The + differences, which are the subject of the Stumpff paper cited + below, are: + + (i) In stars with significant radial velocity and proper motion, + the constantly changing light-time distorts the apparent proper + motion. Note that this is a classical, not a relativistic, + effect. + + (ii) The transformation complies with special relativity. + + 3) Care is needed with units. The star coordinates are in radians + and the proper motions in radians per Julian year, but the + parallax is in arcseconds; the radial velocity is in km/s, but + the pv-vector result is in au and au/day. + + 4) The proper motions are the rate of change of the right ascension + and declination at the catalog epoch and are in radians per Julian + year. The RA proper motion is in terms of coordinate angle, not + true angle, and will thus be numerically larger at high + declinations. + + 5) Straight-line motion at constant speed in the inertial frame is + assumed. If the speed is greater than or equal to the speed of + light, the function aborts with an error status. + + 6) The inverse transformation is performed by the function eraStarpv. + + Called: + eraPn decompose p-vector into modulus and direction + eraPdp scalar product of two p-vectors + eraSxp multiply p-vector by scalar + eraPmp p-vector minus p-vector + eraPm modulus of p-vector + eraPpp p-vector plus p-vector + eraPv2s pv-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Reference: + + Stumpff, P., 1985, Astron.Astrophys. 144, 232-240. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), pv_in[...,0,0]) + ra_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dec_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + px_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rv_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [pv_in[...,0,0], ra_out, dec_out, pmr_out, pmd_out, px_out, rv_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*7 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pvstar(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'pvstar') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ra_out.shape) > 0 and ra_out.shape[0] == 1 + ra_out = ra_out.reshape(ra_out.shape[1:]) + assert len(dec_out.shape) > 0 and dec_out.shape[0] == 1 + dec_out = dec_out.reshape(dec_out.shape[1:]) + assert len(pmr_out.shape) > 0 and pmr_out.shape[0] == 1 + pmr_out = pmr_out.reshape(pmr_out.shape[1:]) + assert len(pmd_out.shape) > 0 and pmd_out.shape[0] == 1 + pmd_out = pmd_out.reshape(pmd_out.shape[1:]) + assert len(px_out.shape) > 0 and px_out.shape[0] == 1 + px_out = px_out.reshape(px_out.shape[1:]) + assert len(rv_out.shape) > 0 and rv_out.shape[0] == 1 + rv_out = rv_out.reshape(rv_out.shape[1:]) + + return ra_out, dec_out, pmr_out, pmd_out, px_out, rv_out +STATUS_CODES['pvstar'] = {0: 'OK', -1: 'superluminal speed (Note 5)', -2: 'null position vector'} + + + +def starpv(ra, dec, pmr, pmd, px, rv): + """ + Wrapper for ERFA function ``eraStarpv``. + + Parameters + ---------- + ra : double array + dec : double array + pmr : double array + pmd : double array + px : double array + rv : double array + + Returns + ------- + pv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a S t a r p v + - - - - - - - - - - + + Convert star catalog coordinates to position+velocity vector. + + Given (Note 1): + ra double right ascension (radians) + dec double declination (radians) + pmr double RA proper motion (radians/year) + pmd double Dec proper motion (radians/year) + px double parallax (arcseconds) + rv double radial velocity (km/s, positive = receding) + + Returned (Note 2): + pv double[2][3] pv-vector (au, au/day) + + Returned (function value): + int status: + 0 = no warnings + 1 = distance overridden (Note 6) + 2 = excessive speed (Note 7) + 4 = solution didn't converge (Note 8) + else = binary logical OR of the above + + Notes: + + 1) The star data accepted by this function are "observables" for an + imaginary observer at the solar-system barycenter. Proper motion + and radial velocity are, strictly, in terms of barycentric + coordinate time, TCB. For most practical applications, it is + permissible to neglect the distinction between TCB and ordinary + "proper" time on Earth (TT/TAI). The result will, as a rule, be + limited by the intrinsic accuracy of the proper-motion and + radial-velocity data; moreover, the pv-vector is likely to be + merely an intermediate result, so that a change of time unit + would cancel out overall. + + In accordance with normal star-catalog conventions, the object's + right ascension and declination are freed from the effects of + secular aberration. The frame, which is aligned to the catalog + equator and equinox, is Lorentzian and centered on the SSB. + + 2) The resulting position and velocity pv-vector is with respect to + the same frame and, like the catalog coordinates, is freed from + the effects of secular aberration. Should the "coordinate + direction", where the object was located at the catalog epoch, be + required, it may be obtained by calculating the magnitude of the + position vector pv[0][0-2] dividing by the speed of light in + au/day to give the light-time, and then multiplying the space + velocity pv[1][0-2] by this light-time and adding the result to + pv[0][0-2]. + + Summarizing, the pv-vector returned is for most stars almost + identical to the result of applying the standard geometrical + "space motion" transformation. The differences, which are the + subject of the Stumpff paper referenced below, are: + + (i) In stars with significant radial velocity and proper motion, + the constantly changing light-time distorts the apparent proper + motion. Note that this is a classical, not a relativistic, + effect. + + (ii) The transformation complies with special relativity. + + 3) Care is needed with units. The star coordinates are in radians + and the proper motions in radians per Julian year, but the + parallax is in arcseconds; the radial velocity is in km/s, but + the pv-vector result is in au and au/day. + + 4) The RA proper motion is in terms of coordinate angle, not true + angle. If the catalog uses arcseconds for both RA and Dec proper + motions, the RA proper motion will need to be divided by cos(Dec) + before use. + + 5) Straight-line motion at constant speed, in the inertial frame, + is assumed. + + 6) An extremely small (or zero or negative) parallax is interpreted + to mean that the object is on the "celestial sphere", the radius + of which is an arbitrary (large) value (see the constant PXMIN). + When the distance is overridden in this way, the status, + initially zero, has 1 added to it. + + 7) If the space velocity is a significant fraction of c (see the + constant VMAX), it is arbitrarily set to zero. When this action + occurs, 2 is added to the status. + + 8) The relativistic adjustment involves an iterative calculation. + If the process fails to converge within a set number (IMAX) of + iterations, 4 is added to the status. + + 9) The inverse transformation is performed by the function + eraPvstar. + + Called: + eraS2pv spherical coordinates to pv-vector + eraPm modulus of p-vector + eraZp zero p-vector + eraPn decompose p-vector into modulus and direction + eraPdp scalar product of two p-vectors + eraSxp multiply p-vector by scalar + eraPmp p-vector minus p-vector + eraPpp p-vector plus p-vector + + Reference: + + Stumpff, P., 1985, Astron.Astrophys. 144, 232-240. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ra_in = numpy.array(ra, dtype=numpy.double, order="C", copy=False, subok=True) + dec_in = numpy.array(dec, dtype=numpy.double, order="C", copy=False, subok=True) + pmr_in = numpy.array(pmr, dtype=numpy.double, order="C", copy=False, subok=True) + pmd_in = numpy.array(pmd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ra_in, dec_in, pmr_in, pmd_in, px_in, rv_in) + pv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ra_in, dec_in, pmr_in, pmd_in, px_in, rv_in, pv_out[...,0,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._starpv(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'starpv') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pv_out.shape) > 0 and pv_out.shape[0] == 1 + pv_out = pv_out.reshape(pv_out.shape[1:]) + + return pv_out +STATUS_CODES['starpv'] = {0: 'no warnings', 1: 'distance overridden (Note 6)', 2: 'excessive speed (Note 7)', 4: "solution didn't converge (Note 8)", 'else': 'binary logical OR of the above'} + + + +def fk52h(r5, d5, dr5, dd5, px5, rv5): + """ + Wrapper for ERFA function ``eraFk52h``. + + Parameters + ---------- + r5 : double array + d5 : double array + dr5 : double array + dd5 : double array + px5 : double array + rv5 : double array + + Returns + ------- + rh : double array + dh : double array + drh : double array + ddh : double array + pxh : double array + rvh : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F k 5 2 h + - - - - - - - - - + + Transform FK5 (J2000.0) star data into the Hipparcos system. + + Given (all FK5, equinox J2000.0, epoch J2000.0): + r5 double RA (radians) + d5 double Dec (radians) + dr5 double proper motion in RA (dRA/dt, rad/Jyear) + dd5 double proper motion in Dec (dDec/dt, rad/Jyear) + px5 double parallax (arcsec) + rv5 double radial velocity (km/s, positive = receding) + + Returned (all Hipparcos, epoch J2000.0): + rh double RA (radians) + dh double Dec (radians) + drh double proper motion in RA (dRA/dt, rad/Jyear) + ddh double proper motion in Dec (dDec/dt, rad/Jyear) + pxh double parallax (arcsec) + rvh double radial velocity (km/s, positive = receding) + + Notes: + + 1) This function transforms FK5 star positions and proper motions + into the system of the Hipparcos catalog. + + 2) The proper motions in RA are dRA/dt rather than + cos(Dec)*dRA/dt, and are per year rather than per century. + + 3) The FK5 to Hipparcos transformation is modeled as a pure + rotation and spin; zonal errors in the FK5 catalog are not + taken into account. + + 4) See also eraH2fk5, eraFk5hz, eraHfk5z. + + Called: + eraStarpv star catalog data to space motion pv-vector + eraFk5hip FK5 to Hipparcos rotation and spin + eraRxp product of r-matrix and p-vector + eraPxp vector product of two p-vectors + eraPpp p-vector plus p-vector + eraPvstar space motion pv-vector to star catalog data + + Reference: + + F.Mignard & M.Froeschle, Astron. Astrophys. 354, 732-739 (2000). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r5_in = numpy.array(r5, dtype=numpy.double, order="C", copy=False, subok=True) + d5_in = numpy.array(d5, dtype=numpy.double, order="C", copy=False, subok=True) + dr5_in = numpy.array(dr5, dtype=numpy.double, order="C", copy=False, subok=True) + dd5_in = numpy.array(dd5, dtype=numpy.double, order="C", copy=False, subok=True) + px5_in = numpy.array(px5, dtype=numpy.double, order="C", copy=False, subok=True) + rv5_in = numpy.array(rv5, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r5_in, d5_in, dr5_in, dd5_in, px5_in, rv5_in) + rh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + drh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + ddh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pxh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rvh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r5_in, d5_in, dr5_in, dd5_in, px5_in, rv5_in, rh_out, dh_out, drh_out, ddh_out, pxh_out, rvh_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fk52h(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rh_out.shape) > 0 and rh_out.shape[0] == 1 + rh_out = rh_out.reshape(rh_out.shape[1:]) + assert len(dh_out.shape) > 0 and dh_out.shape[0] == 1 + dh_out = dh_out.reshape(dh_out.shape[1:]) + assert len(drh_out.shape) > 0 and drh_out.shape[0] == 1 + drh_out = drh_out.reshape(drh_out.shape[1:]) + assert len(ddh_out.shape) > 0 and ddh_out.shape[0] == 1 + ddh_out = ddh_out.reshape(ddh_out.shape[1:]) + assert len(pxh_out.shape) > 0 and pxh_out.shape[0] == 1 + pxh_out = pxh_out.reshape(pxh_out.shape[1:]) + assert len(rvh_out.shape) > 0 and rvh_out.shape[0] == 1 + rvh_out = rvh_out.reshape(rvh_out.shape[1:]) + + return rh_out, dh_out, drh_out, ddh_out, pxh_out, rvh_out + + +def fk5hip(): + """ + Wrapper for ERFA function ``eraFk5hip``. + + Parameters + ---------- + + Returns + ------- + r5h : double array + s5h : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F k 5 h i p + - - - - - - - - - - + + FK5 to Hipparcos rotation and spin. + + Returned: + r5h double[3][3] r-matrix: FK5 rotation wrt Hipparcos (Note 2) + s5h double[3] r-vector: FK5 spin wrt Hipparcos (Note 3) + + Notes: + + 1) This function models the FK5 to Hipparcos transformation as a + pure rotation and spin; zonal errors in the FK5 catalogue are + not taken into account. + + 2) The r-matrix r5h operates in the sense: + + P_Hipparcos = r5h x P_FK5 + + where P_FK5 is a p-vector in the FK5 frame, and P_Hipparcos is + the equivalent Hipparcos p-vector. + + 3) The r-vector s5h represents the time derivative of the FK5 to + Hipparcos rotation. The units are radians per year (Julian, + TDB). + + Called: + eraRv2m r-vector to r-matrix + + Reference: + + F.Mignard & M.Froeschle, Astron. Astrophys. 354, 732-739 (2000). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ) + r5h_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + s5h_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r5h_out[...,0,0], s5h_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*0 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fk5hip(it) + + return r5h_out, s5h_out + + +def fk5hz(r5, d5, date1, date2): + """ + Wrapper for ERFA function ``eraFk5hz``. + + Parameters + ---------- + r5 : double array + d5 : double array + date1 : double array + date2 : double array + + Returns + ------- + rh : double array + dh : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F k 5 h z + - - - - - - - - - + + Transform an FK5 (J2000.0) star position into the system of the + Hipparcos catalogue, assuming zero Hipparcos proper motion. + + Given: + r5 double FK5 RA (radians), equinox J2000.0, at date + d5 double FK5 Dec (radians), equinox J2000.0, at date + date1,date2 double TDB date (Notes 1,2) + + Returned: + rh double Hipparcos RA (radians) + dh double Hipparcos Dec (radians) + + Notes: + + 1) This function converts a star position from the FK5 system to + the Hipparcos system, in such a way that the Hipparcos proper + motion is zero. Because such a star has, in general, a non-zero + proper motion in the FK5 system, the function requires the date + at which the position in the FK5 system was determined. + + 2) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 3) The FK5 to Hipparcos transformation is modeled as a pure + rotation and spin; zonal errors in the FK5 catalogue are not + taken into account. + + 4) The position returned by this function is in the Hipparcos + reference system but at date date1+date2. + + 5) See also eraFk52h, eraH2fk5, eraHfk5z. + + Called: + eraS2c spherical coordinates to unit vector + eraFk5hip FK5 to Hipparcos rotation and spin + eraSxp multiply p-vector by scalar + eraRv2m r-vector to r-matrix + eraTrxp product of transpose of r-matrix and p-vector + eraPxp vector product of two p-vectors + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Reference: + + F.Mignard & M.Froeschle, 2000, Astron.Astrophys. 354, 732-739. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r5_in = numpy.array(r5, dtype=numpy.double, order="C", copy=False, subok=True) + d5_in = numpy.array(d5, dtype=numpy.double, order="C", copy=False, subok=True) + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r5_in, d5_in, date1_in, date2_in) + rh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r5_in, d5_in, date1_in, date2_in, rh_out, dh_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fk5hz(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rh_out.shape) > 0 and rh_out.shape[0] == 1 + rh_out = rh_out.reshape(rh_out.shape[1:]) + assert len(dh_out.shape) > 0 and dh_out.shape[0] == 1 + dh_out = dh_out.reshape(dh_out.shape[1:]) + + return rh_out, dh_out + + +def h2fk5(rh, dh, drh, ddh, pxh, rvh): + """ + Wrapper for ERFA function ``eraH2fk5``. + + Parameters + ---------- + rh : double array + dh : double array + drh : double array + ddh : double array + pxh : double array + rvh : double array + + Returns + ------- + r5 : double array + d5 : double array + dr5 : double array + dd5 : double array + px5 : double array + rv5 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a H 2 f k 5 + - - - - - - - - - + + Transform Hipparcos star data into the FK5 (J2000.0) system. + + Given (all Hipparcos, epoch J2000.0): + rh double RA (radians) + dh double Dec (radians) + drh double proper motion in RA (dRA/dt, rad/Jyear) + ddh double proper motion in Dec (dDec/dt, rad/Jyear) + pxh double parallax (arcsec) + rvh double radial velocity (km/s, positive = receding) + + Returned (all FK5, equinox J2000.0, epoch J2000.0): + r5 double RA (radians) + d5 double Dec (radians) + dr5 double proper motion in RA (dRA/dt, rad/Jyear) + dd5 double proper motion in Dec (dDec/dt, rad/Jyear) + px5 double parallax (arcsec) + rv5 double radial velocity (km/s, positive = receding) + + Notes: + + 1) This function transforms Hipparcos star positions and proper + motions into FK5 J2000.0. + + 2) The proper motions in RA are dRA/dt rather than + cos(Dec)*dRA/dt, and are per year rather than per century. + + 3) The FK5 to Hipparcos transformation is modeled as a pure + rotation and spin; zonal errors in the FK5 catalog are not + taken into account. + + 4) See also eraFk52h, eraFk5hz, eraHfk5z. + + Called: + eraStarpv star catalog data to space motion pv-vector + eraFk5hip FK5 to Hipparcos rotation and spin + eraRv2m r-vector to r-matrix + eraRxp product of r-matrix and p-vector + eraTrxp product of transpose of r-matrix and p-vector + eraPxp vector product of two p-vectors + eraPmp p-vector minus p-vector + eraPvstar space motion pv-vector to star catalog data + + Reference: + + F.Mignard & M.Froeschle, Astron. Astrophys. 354, 732-739 (2000). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + dh_in = numpy.array(dh, dtype=numpy.double, order="C", copy=False, subok=True) + drh_in = numpy.array(drh, dtype=numpy.double, order="C", copy=False, subok=True) + ddh_in = numpy.array(ddh, dtype=numpy.double, order="C", copy=False, subok=True) + pxh_in = numpy.array(pxh, dtype=numpy.double, order="C", copy=False, subok=True) + rvh_in = numpy.array(rvh, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rh_in, dh_in, drh_in, ddh_in, pxh_in, rvh_in) + r5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + d5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dr5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + px5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rv5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rh_in, dh_in, drh_in, ddh_in, pxh_in, rvh_in, r5_out, d5_out, dr5_out, dd5_out, px5_out, rv5_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._h2fk5(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(r5_out.shape) > 0 and r5_out.shape[0] == 1 + r5_out = r5_out.reshape(r5_out.shape[1:]) + assert len(d5_out.shape) > 0 and d5_out.shape[0] == 1 + d5_out = d5_out.reshape(d5_out.shape[1:]) + assert len(dr5_out.shape) > 0 and dr5_out.shape[0] == 1 + dr5_out = dr5_out.reshape(dr5_out.shape[1:]) + assert len(dd5_out.shape) > 0 and dd5_out.shape[0] == 1 + dd5_out = dd5_out.reshape(dd5_out.shape[1:]) + assert len(px5_out.shape) > 0 and px5_out.shape[0] == 1 + px5_out = px5_out.reshape(px5_out.shape[1:]) + assert len(rv5_out.shape) > 0 and rv5_out.shape[0] == 1 + rv5_out = rv5_out.reshape(rv5_out.shape[1:]) + + return r5_out, d5_out, dr5_out, dd5_out, px5_out, rv5_out + + +def hfk5z(rh, dh, date1, date2): + """ + Wrapper for ERFA function ``eraHfk5z``. + + Parameters + ---------- + rh : double array + dh : double array + date1 : double array + date2 : double array + + Returns + ------- + r5 : double array + d5 : double array + dr5 : double array + dd5 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a H f k 5 z + - - - - - - - - - + + Transform a Hipparcos star position into FK5 J2000.0, assuming + zero Hipparcos proper motion. + + Given: + rh double Hipparcos RA (radians) + dh double Hipparcos Dec (radians) + date1,date2 double TDB date (Note 1) + + Returned (all FK5, equinox J2000.0, date date1+date2): + r5 double RA (radians) + d5 double Dec (radians) + dr5 double FK5 RA proper motion (rad/year, Note 4) + dd5 double Dec proper motion (rad/year, Note 4) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 3) The FK5 to Hipparcos transformation is modeled as a pure rotation + and spin; zonal errors in the FK5 catalogue are not taken into + account. + + 4) It was the intention that Hipparcos should be a close + approximation to an inertial frame, so that distant objects have + zero proper motion; such objects have (in general) non-zero + proper motion in FK5, and this function returns those fictitious + proper motions. + + 5) The position returned by this function is in the FK5 J2000.0 + reference system but at date date1+date2. + + 6) See also eraFk52h, eraH2fk5, eraFk5zhz. + + Called: + eraS2c spherical coordinates to unit vector + eraFk5hip FK5 to Hipparcos rotation and spin + eraRxp product of r-matrix and p-vector + eraSxp multiply p-vector by scalar + eraRxr product of two r-matrices + eraTrxp product of transpose of r-matrix and p-vector + eraPxp vector product of two p-vectors + eraPv2s pv-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Reference: + + F.Mignard & M.Froeschle, 2000, Astron.Astrophys. 354, 732-739. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + dh_in = numpy.array(dh, dtype=numpy.double, order="C", copy=False, subok=True) + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rh_in, dh_in, date1_in, date2_in) + r5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + d5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dr5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rh_in, dh_in, date1_in, date2_in, r5_out, d5_out, dr5_out, dd5_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*4 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._hfk5z(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(r5_out.shape) > 0 and r5_out.shape[0] == 1 + r5_out = r5_out.reshape(r5_out.shape[1:]) + assert len(d5_out.shape) > 0 and d5_out.shape[0] == 1 + d5_out = d5_out.reshape(d5_out.shape[1:]) + assert len(dr5_out.shape) > 0 and dr5_out.shape[0] == 1 + dr5_out = dr5_out.reshape(dr5_out.shape[1:]) + assert len(dd5_out.shape) > 0 and dd5_out.shape[0] == 1 + dd5_out = dd5_out.reshape(dd5_out.shape[1:]) + + return r5_out, d5_out, dr5_out, dd5_out + + +def starpm(ra1, dec1, pmr1, pmd1, px1, rv1, ep1a, ep1b, ep2a, ep2b): + """ + Wrapper for ERFA function ``eraStarpm``. + + Parameters + ---------- + ra1 : double array + dec1 : double array + pmr1 : double array + pmd1 : double array + px1 : double array + rv1 : double array + ep1a : double array + ep1b : double array + ep2a : double array + ep2b : double array + + Returns + ------- + ra2 : double array + dec2 : double array + pmr2 : double array + pmd2 : double array + px2 : double array + rv2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a S t a r p m + - - - - - - - - - - + + Star proper motion: update star catalog data for space motion. + + Given: + ra1 double right ascension (radians), before + dec1 double declination (radians), before + pmr1 double RA proper motion (radians/year), before + pmd1 double Dec proper motion (radians/year), before + px1 double parallax (arcseconds), before + rv1 double radial velocity (km/s, +ve = receding), before + ep1a double "before" epoch, part A (Note 1) + ep1b double "before" epoch, part B (Note 1) + ep2a double "after" epoch, part A (Note 1) + ep2b double "after" epoch, part B (Note 1) + + Returned: + ra2 double right ascension (radians), after + dec2 double declination (radians), after + pmr2 double RA proper motion (radians/year), after + pmd2 double Dec proper motion (radians/year), after + px2 double parallax (arcseconds), after + rv2 double radial velocity (km/s, +ve = receding), after + + Returned (function value): + int status: + -1 = system error (should not occur) + 0 = no warnings or errors + 1 = distance overridden (Note 6) + 2 = excessive velocity (Note 7) + 4 = solution didn't converge (Note 8) + else = binary logical OR of the above warnings + + Notes: + + 1) The starting and ending TDB dates ep1a+ep1b and ep2a+ep2b are + Julian Dates, apportioned in any convenient way between the two + parts (A and B). For example, JD(TDB)=2450123.7 could be + expressed in any of these ways, among others: + + epna epnb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) In accordance with normal star-catalog conventions, the object's + right ascension and declination are freed from the effects of + secular aberration. The frame, which is aligned to the catalog + equator and equinox, is Lorentzian and centered on the SSB. + + The proper motions are the rate of change of the right ascension + and declination at the catalog epoch and are in radians per TDB + Julian year. + + The parallax and radial velocity are in the same frame. + + 3) Care is needed with units. The star coordinates are in radians + and the proper motions in radians per Julian year, but the + parallax is in arcseconds. + + 4) The RA proper motion is in terms of coordinate angle, not true + angle. If the catalog uses arcseconds for both RA and Dec proper + motions, the RA proper motion will need to be divided by cos(Dec) + before use. + + 5) Straight-line motion at constant speed, in the inertial frame, + is assumed. + + 6) An extremely small (or zero or negative) parallax is interpreted + to mean that the object is on the "celestial sphere", the radius + of which is an arbitrary (large) value (see the eraStarpv + function for the value used). When the distance is overridden in + this way, the status, initially zero, has 1 added to it. + + 7) If the space velocity is a significant fraction of c (see the + constant VMAX in the function eraStarpv), it is arbitrarily set + to zero. When this action occurs, 2 is added to the status. + + 8) The relativistic adjustment carried out in the eraStarpv function + involves an iterative calculation. If the process fails to + converge within a set number of iterations, 4 is added to the + status. + + Called: + eraStarpv star catalog data to space motion pv-vector + eraPvu update a pv-vector + eraPdp scalar product of two p-vectors + eraPvstar space motion pv-vector to star catalog data + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ra1_in = numpy.array(ra1, dtype=numpy.double, order="C", copy=False, subok=True) + dec1_in = numpy.array(dec1, dtype=numpy.double, order="C", copy=False, subok=True) + pmr1_in = numpy.array(pmr1, dtype=numpy.double, order="C", copy=False, subok=True) + pmd1_in = numpy.array(pmd1, dtype=numpy.double, order="C", copy=False, subok=True) + px1_in = numpy.array(px1, dtype=numpy.double, order="C", copy=False, subok=True) + rv1_in = numpy.array(rv1, dtype=numpy.double, order="C", copy=False, subok=True) + ep1a_in = numpy.array(ep1a, dtype=numpy.double, order="C", copy=False, subok=True) + ep1b_in = numpy.array(ep1b, dtype=numpy.double, order="C", copy=False, subok=True) + ep2a_in = numpy.array(ep2a, dtype=numpy.double, order="C", copy=False, subok=True) + ep2b_in = numpy.array(ep2b, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ra1_in, dec1_in, pmr1_in, pmd1_in, px1_in, rv1_in, ep1a_in, ep1b_in, ep2a_in, ep2b_in) + ra2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dec2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmr2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmd2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + px2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rv2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ra1_in, dec1_in, pmr1_in, pmd1_in, px1_in, rv1_in, ep1a_in, ep1b_in, ep2a_in, ep2b_in, ra2_out, dec2_out, pmr2_out, pmd2_out, px2_out, rv2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*10 + [['readwrite']]*7 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._starpm(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'starpm') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ra2_out.shape) > 0 and ra2_out.shape[0] == 1 + ra2_out = ra2_out.reshape(ra2_out.shape[1:]) + assert len(dec2_out.shape) > 0 and dec2_out.shape[0] == 1 + dec2_out = dec2_out.reshape(dec2_out.shape[1:]) + assert len(pmr2_out.shape) > 0 and pmr2_out.shape[0] == 1 + pmr2_out = pmr2_out.reshape(pmr2_out.shape[1:]) + assert len(pmd2_out.shape) > 0 and pmd2_out.shape[0] == 1 + pmd2_out = pmd2_out.reshape(pmd2_out.shape[1:]) + assert len(px2_out.shape) > 0 and px2_out.shape[0] == 1 + px2_out = px2_out.reshape(px2_out.shape[1:]) + assert len(rv2_out.shape) > 0 and rv2_out.shape[0] == 1 + rv2_out = rv2_out.reshape(rv2_out.shape[1:]) + + return ra2_out, dec2_out, pmr2_out, pmd2_out, px2_out, rv2_out +STATUS_CODES['starpm'] = {-1: 'system error (should not occur)', 0: 'no warnings or errors', 1: 'distance overridden (Note 6)', 2: 'excessive velocity (Note 7)', 4: "solution didn't converge (Note 8)", 'else': 'binary logical OR of the above warnings'} + + + +def eceq06(date1, date2, dl, db): + """ + Wrapper for ERFA function ``eraEceq06``. + + Parameters + ---------- + date1 : double array + date2 : double array + dl : double array + db : double array + + Returns + ------- + dr : double array + dd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E c e q 0 6 + - - - - - - - - - - + + Transformation from ecliptic coordinates (mean equinox and ecliptic + of date) to ICRS RA,Dec, using the IAU 2006 precession model. + + Given: + date1,date2 double TT as a 2-part Julian date (Note 1) + dl,db double ecliptic longitude and latitude (radians) + + Returned: + dr,dd double ICRS right ascension and declination (radians) + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) No assumptions are made about whether the coordinates represent + starlight and embody astrometric effects such as parallax or + aberration. + + 3) The transformation is approximately that from ecliptic longitude + and latitude (mean equinox and ecliptic of date) to mean J2000.0 + right ascension and declination, with only frame bias (always + less than 25 mas) to disturb this classical picture. + + Called: + eraS2c spherical coordinates to unit vector + eraEcm06 J2000.0 to ecliptic rotation matrix, IAU 2006 + eraTrxp product of transpose of r-matrix and p-vector + eraC2s unit vector to spherical coordinates + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + dl_in = numpy.array(dl, dtype=numpy.double, order="C", copy=False, subok=True) + db_in = numpy.array(db, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, dl_in, db_in) + dr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dl_in, db_in, dr_out, dd_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eceq06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dr_out.shape) > 0 and dr_out.shape[0] == 1 + dr_out = dr_out.reshape(dr_out.shape[1:]) + assert len(dd_out.shape) > 0 and dd_out.shape[0] == 1 + dd_out = dd_out.reshape(dd_out.shape[1:]) + + return dr_out, dd_out + + +def ecm06(date1, date2): + """ + Wrapper for ERFA function ``eraEcm06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E c m 0 6 + - - - - - - - - - + + ICRS equatorial to ecliptic rotation matrix, IAU 2006. + + Given: + date1,date2 double TT as a 2-part Julian date (Note 1) + + Returned: + rm double[3][3] ICRS to ecliptic rotation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 1) The matrix is in the sense + + E_ep = rm x P_ICRS, + + where P_ICRS is a vector with respect to ICRS right ascension + and declination axes and E_ep is the same vector with respect to + the (inertial) ecliptic and equinox of date. + + 2) P_ICRS is a free vector, merely a direction, typically of unit + magnitude, and not bound to any particular spatial origin, such + as the Earth, Sun or SSB. No assumptions are made about whether + it represents starlight and embodies astrometric effects such as + parallax or aberration. The transformation is approximately that + between mean J2000.0 right ascension and declination and ecliptic + longitude and latitude, with only frame bias (always less than + 25 mas) to disturb this classical picture. + + Called: + eraObl06 mean obliquity, IAU 2006 + eraPmat06 PB matrix, IAU 2006 + eraIr initialize r-matrix to identity + eraRx rotate around X-axis + eraRxr product of two r-matrices + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rm_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rm_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ecm06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rm_out.shape) > 0 and rm_out.shape[0] == 1 + rm_out = rm_out.reshape(rm_out.shape[1:]) + + return rm_out + + +def eqec06(date1, date2, dr, dd): + """ + Wrapper for ERFA function ``eraEqec06``. + + Parameters + ---------- + date1 : double array + date2 : double array + dr : double array + dd : double array + + Returns + ------- + dl : double array + db : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E q e c 0 6 + - - - - - - - - - - + + Transformation from ICRS equatorial coordinates to ecliptic + coordinates (mean equinox and ecliptic of date) using IAU 2006 + precession model. + + Given: + date1,date2 double TT as a 2-part Julian date (Note 1) + dr,dd double ICRS right ascension and declination (radians) + + Returned: + dl,db double ecliptic longitude and latitude (radians) + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) No assumptions are made about whether the coordinates represent + starlight and embody astrometric effects such as parallax or + aberration. + + 3) The transformation is approximately that from mean J2000.0 right + ascension and declination to ecliptic longitude and latitude + (mean equinox and ecliptic of date), with only frame bias (always + less than 25 mas) to disturb this classical picture. + + Called: + eraS2c spherical coordinates to unit vector + eraEcm06 J2000.0 to ecliptic rotation matrix, IAU 2006 + eraRxp product of r-matrix and p-vector + eraC2s unit vector to spherical coordinates + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + dr_in = numpy.array(dr, dtype=numpy.double, order="C", copy=False, subok=True) + dd_in = numpy.array(dd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, dr_in, dd_in) + dl_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + db_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dr_in, dd_in, dl_out, db_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eqec06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dl_out.shape) > 0 and dl_out.shape[0] == 1 + dl_out = dl_out.reshape(dl_out.shape[1:]) + assert len(db_out.shape) > 0 and db_out.shape[0] == 1 + db_out = db_out.reshape(db_out.shape[1:]) + + return dl_out, db_out + + +def lteceq(epj, dl, db): + """ + Wrapper for ERFA function ``eraLteceq``. + + Parameters + ---------- + epj : double array + dl : double array + db : double array + + Returns + ------- + dr : double array + dd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a L t e c e q + - - - - - - - - - - + + Transformation from ecliptic coordinates (mean equinox and ecliptic + of date) to ICRS RA,Dec, using a long-term precession model. + + Given: + epj double Julian epoch (TT) + dl,db double ecliptic longitude and latitude (radians) + + Returned: + dr,dd double ICRS right ascension and declination (radians) + + 1) No assumptions are made about whether the coordinates represent + starlight and embody astrometric effects such as parallax or + aberration. + + 2) The transformation is approximately that from ecliptic longitude + and latitude (mean equinox and ecliptic of date) to mean J2000.0 + right ascension and declination, with only frame bias (always + less than 25 mas) to disturb this classical picture. + + 3) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + Called: + eraS2c spherical coordinates to unit vector + eraLtecm J2000.0 to ecliptic rotation matrix, long term + eraTrxp product of transpose of r-matrix and p-vector + eraC2s unit vector to spherical coordinates + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + dl_in = numpy.array(dl, dtype=numpy.double, order="C", copy=False, subok=True) + db_in = numpy.array(db, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in, dl_in, db_in) + dr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, dl_in, db_in, dr_out, dd_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._lteceq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dr_out.shape) > 0 and dr_out.shape[0] == 1 + dr_out = dr_out.reshape(dr_out.shape[1:]) + assert len(dd_out.shape) > 0 and dd_out.shape[0] == 1 + dd_out = dd_out.reshape(dd_out.shape[1:]) + + return dr_out, dd_out + + +def ltecm(epj): + """ + Wrapper for ERFA function ``eraLtecm``. + + Parameters + ---------- + epj : double array + + Returns + ------- + rm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a L t e c m + - - - - - - - - - + + ICRS equatorial to ecliptic rotation matrix, long-term. + + Given: + epj double Julian epoch (TT) + + Returned: + rm double[3][3] ICRS to ecliptic rotation matrix + + Notes: + + 1) The matrix is in the sense + + E_ep = rm x P_ICRS, + + where P_ICRS is a vector with respect to ICRS right ascension + and declination axes and E_ep is the same vector with respect to + the (inertial) ecliptic and equinox of epoch epj. + + 2) P_ICRS is a free vector, merely a direction, typically of unit + magnitude, and not bound to any particular spatial origin, such + as the Earth, Sun or SSB. No assumptions are made about whether + it represents starlight and embodies astrometric effects such as + parallax or aberration. The transformation is approximately that + between mean J2000.0 right ascension and declination and ecliptic + longitude and latitude, with only frame bias (always less than + 25 mas) to disturb this classical picture. + + 3) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + Called: + eraLtpequ equator pole, long term + eraLtpecl ecliptic pole, long term + eraPxp vector product + eraPn normalize vector + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + rm_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, rm_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltecm(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rm_out.shape) > 0 and rm_out.shape[0] == 1 + rm_out = rm_out.reshape(rm_out.shape[1:]) + + return rm_out + + +def lteqec(epj, dr, dd): + """ + Wrapper for ERFA function ``eraLteqec``. + + Parameters + ---------- + epj : double array + dr : double array + dd : double array + + Returns + ------- + dl : double array + db : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a L t e q e c + - - - - - - - - - - + + Transformation from ICRS equatorial coordinates to ecliptic + coordinates (mean equinox and ecliptic of date) using a long-term + precession model. + + Given: + epj double Julian epoch (TT) + dr,dd double ICRS right ascension and declination (radians) + + Returned: + dl,db double ecliptic longitude and latitude (radians) + + 1) No assumptions are made about whether the coordinates represent + starlight and embody astrometric effects such as parallax or + aberration. + + 2) The transformation is approximately that from mean J2000.0 right + ascension and declination to ecliptic longitude and latitude + (mean equinox and ecliptic of date), with only frame bias (always + less than 25 mas) to disturb this classical picture. + + 3) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + Called: + eraS2c spherical coordinates to unit vector + eraLtecm J2000.0 to ecliptic rotation matrix, long term + eraRxp product of r-matrix and p-vector + eraC2s unit vector to spherical coordinates + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + dr_in = numpy.array(dr, dtype=numpy.double, order="C", copy=False, subok=True) + dd_in = numpy.array(dd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in, dr_in, dd_in) + dl_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + db_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, dr_in, dd_in, dl_out, db_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._lteqec(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dl_out.shape) > 0 and dl_out.shape[0] == 1 + dl_out = dl_out.reshape(dl_out.shape[1:]) + assert len(db_out.shape) > 0 and db_out.shape[0] == 1 + db_out = db_out.reshape(db_out.shape[1:]) + + return dl_out, db_out + + +def g2icrs(dl, db): + """ + Wrapper for ERFA function ``eraG2icrs``. + + Parameters + ---------- + dl : double array + db : double array + + Returns + ------- + dr : double array + dd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G 2 i c r s + - - - - - - - - - - + + Transformation from Galactic Coordinates to ICRS. + + Given: + dl double galactic longitude (radians) + db double galactic latitude (radians) + + Returned: + dr double ICRS right ascension (radians) + dd double ICRS declination (radians) + + Notes: + + 1) The IAU 1958 system of Galactic coordinates was defined with + respect to the now obsolete reference system FK4 B1950.0. When + interpreting the system in a modern context, several factors have + to be taken into account: + + . The inclusion in FK4 positions of the E-terms of aberration. + + . The distortion of the FK4 proper motion system by differential + Galactic rotation. + + . The use of the B1950.0 equinox rather than the now-standard + J2000.0. + + . The frame bias between ICRS and the J2000.0 mean place system. + + The Hipparcos Catalogue (Perryman & ESA 1997) provides a rotation + matrix that transforms directly between ICRS and Galactic + coordinates with the above factors taken into account. The + matrix is derived from three angles, namely the ICRS coordinates + of the Galactic pole and the longitude of the ascending node of + the galactic equator on the ICRS equator. They are given in + degrees to five decimal places and for canonical purposes are + regarded as exact. In the Hipparcos Catalogue the matrix + elements are given to 10 decimal places (about 20 microarcsec). + In the present ERFA function the matrix elements have been + recomputed from the canonical three angles and are given to 30 + decimal places. + + 2) The inverse transformation is performed by the function eraIcrs2g. + + Called: + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + eraS2c spherical coordinates to unit vector + eraTrxp product of transpose of r-matrix and p-vector + eraC2s p-vector to spherical + + Reference: + Perryman M.A.C. & ESA, 1997, ESA SP-1200, The Hipparcos and Tycho + catalogues. Astrometric and photometric star catalogues + derived from the ESA Hipparcos Space Astrometry Mission. ESA + Publications Division, Noordwijk, Netherlands. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dl_in = numpy.array(dl, dtype=numpy.double, order="C", copy=False, subok=True) + db_in = numpy.array(db, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dl_in, db_in) + dr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dl_in, db_in, dr_out, dd_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._g2icrs(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dr_out.shape) > 0 and dr_out.shape[0] == 1 + dr_out = dr_out.reshape(dr_out.shape[1:]) + assert len(dd_out.shape) > 0 and dd_out.shape[0] == 1 + dd_out = dd_out.reshape(dd_out.shape[1:]) + + return dr_out, dd_out + + +def icrs2g(dr, dd): + """ + Wrapper for ERFA function ``eraIcrs2g``. + + Parameters + ---------- + dr : double array + dd : double array + + Returns + ------- + dl : double array + db : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a I c r s 2 g + - - - - - - - - - - + + Transformation from ICRS to Galactic Coordinates. + + Given: + dr double ICRS right ascension (radians) + dd double ICRS declination (radians) + + Returned: + dl double galactic longitude (radians) + db double galactic latitude (radians) + + Notes: + + 1) The IAU 1958 system of Galactic coordinates was defined with + respect to the now obsolete reference system FK4 B1950.0. When + interpreting the system in a modern context, several factors have + to be taken into account: + + . The inclusion in FK4 positions of the E-terms of aberration. + + . The distortion of the FK4 proper motion system by differential + Galactic rotation. + + . The use of the B1950.0 equinox rather than the now-standard + J2000.0. + + . The frame bias between ICRS and the J2000.0 mean place system. + + The Hipparcos Catalogue (Perryman & ESA 1997) provides a rotation + matrix that transforms directly between ICRS and Galactic + coordinates with the above factors taken into account. The + matrix is derived from three angles, namely the ICRS coordinates + of the Galactic pole and the longitude of the ascending node of + the galactic equator on the ICRS equator. They are given in + degrees to five decimal places and for canonical purposes are + regarded as exact. In the Hipparcos Catalogue the matrix + elements are given to 10 decimal places (about 20 microarcsec). + In the present ERFA function the matrix elements have been + recomputed from the canonical three angles and are given to 30 + decimal places. + + 2) The inverse transformation is performed by the function eraG2icrs. + + Called: + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + eraS2c spherical coordinates to unit vector + eraRxp product of r-matrix and p-vector + eraC2s p-vector to spherical + + Reference: + Perryman M.A.C. & ESA, 1997, ESA SP-1200, The Hipparcos and Tycho + catalogues. Astrometric and photometric star catalogues + derived from the ESA Hipparcos Space Astrometry Mission. ESA + Publications Division, Noordwijk, Netherlands. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dr_in = numpy.array(dr, dtype=numpy.double, order="C", copy=False, subok=True) + dd_in = numpy.array(dd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dr_in, dd_in) + dl_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + db_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dr_in, dd_in, dl_out, db_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._icrs2g(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dl_out.shape) > 0 and dl_out.shape[0] == 1 + dl_out = dl_out.reshape(dl_out.shape[1:]) + assert len(db_out.shape) > 0 and db_out.shape[0] == 1 + db_out = db_out.reshape(db_out.shape[1:]) + + return dl_out, db_out + + +def eform(n): + """ + Wrapper for ERFA function ``eraEform``. + + Parameters + ---------- + n : int array + + Returns + ------- + a : double array + f : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E f o r m + - - - - - - - - - + + Earth reference ellipsoids. + + Given: + n int ellipsoid identifier (Note 1) + + Returned: + a double equatorial radius (meters, Note 2) + f double flattening (Note 2) + + Returned (function value): + int status: 0 = OK + -1 = illegal identifier (Note 3) + + Notes: + + 1) The identifier n is a number that specifies the choice of + reference ellipsoid. The following are supported: + + n ellipsoid + + 1 ERFA_WGS84 + 2 ERFA_GRS80 + 3 ERFA_WGS72 + + The n value has no significance outside the ERFA software. For + convenience, symbols ERFA_WGS84 etc. are defined in erfam.h. + + 2) The ellipsoid parameters are returned in the form of equatorial + radius in meters (a) and flattening (f). The latter is a number + around 0.00335, i.e. around 1/298. + + 3) For the case where an unsupported n value is supplied, zero a and + f are returned, as well as error status. + + References: + + Department of Defense World Geodetic System 1984, National + Imagery and Mapping Agency Technical Report 8350.2, Third + Edition, p3-2. + + Moritz, H., Bull. Geodesique 66-2, 187 (1992). + + The Department of Defense World Geodetic System 1972, World + Geodetic System Committee, May 1974. + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + p220. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), n_in) + a_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + f_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [n_in, a_out, f_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eform(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'eform') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(a_out.shape) > 0 and a_out.shape[0] == 1 + a_out = a_out.reshape(a_out.shape[1:]) + assert len(f_out.shape) > 0 and f_out.shape[0] == 1 + f_out = f_out.reshape(f_out.shape[1:]) + + return a_out, f_out +STATUS_CODES['eform'] = {0: 'OK', -1: 'illegal identifier (Note 3)'} + + + +def gc2gd(n, xyz): + """ + Wrapper for ERFA function ``eraGc2gd``. + + Parameters + ---------- + n : int array + xyz : double array + + Returns + ------- + elong : double array + phi : double array + height : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a G c 2 g d + - - - - - - - - - + + Transform geocentric coordinates to geodetic using the specified + reference ellipsoid. + + Given: + n int ellipsoid identifier (Note 1) + xyz double[3] geocentric vector (Note 2) + + Returned: + elong double longitude (radians, east +ve, Note 3) + phi double latitude (geodetic, radians, Note 3) + height double height above ellipsoid (geodetic, Notes 2,3) + + Returned (function value): + int status: 0 = OK + -1 = illegal identifier (Note 3) + -2 = internal error (Note 3) + + Notes: + + 1) The identifier n is a number that specifies the choice of + reference ellipsoid. The following are supported: + + n ellipsoid + + 1 ERFA_WGS84 + 2 ERFA_GRS80 + 3 ERFA_WGS72 + + The n value has no significance outside the ERFA software. For + convenience, symbols ERFA_WGS84 etc. are defined in erfam.h. + + 2) The geocentric vector (xyz, given) and height (height, returned) + are in meters. + + 3) An error status -1 means that the identifier n is illegal. An + error status -2 is theoretically impossible. In all error cases, + all three results are set to -1e9. + + 4) The inverse transformation is performed in the function eraGd2gc. + + Called: + eraEform Earth reference ellipsoids + eraGc2gde geocentric to geodetic transformation, general + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + xyz_in = numpy.array(xyz, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(xyz_in, (3,), "xyz") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), n_in, xyz_in[...,0]) + elong_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + height_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [n_in, xyz_in[...,0], elong_out, phi_out, height_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*4 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gc2gd(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'gc2gd') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(elong_out.shape) > 0 and elong_out.shape[0] == 1 + elong_out = elong_out.reshape(elong_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(height_out.shape) > 0 and height_out.shape[0] == 1 + height_out = height_out.reshape(height_out.shape[1:]) + + return elong_out, phi_out, height_out +STATUS_CODES['gc2gd'] = {0: 'OK', -1: 'illegal identifier (Note 3)', -2: 'internal error (Note 3)'} + + + +def gc2gde(a, f, xyz): + """ + Wrapper for ERFA function ``eraGc2gde``. + + Parameters + ---------- + a : double array + f : double array + xyz : double array + + Returns + ------- + elong : double array + phi : double array + height : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G c 2 g d e + - - - - - - - - - - + + Transform geocentric coordinates to geodetic for a reference + ellipsoid of specified form. + + Given: + a double equatorial radius (Notes 2,4) + f double flattening (Note 3) + xyz double[3] geocentric vector (Note 4) + + Returned: + elong double longitude (radians, east +ve) + phi double latitude (geodetic, radians) + height double height above ellipsoid (geodetic, Note 4) + + Returned (function value): + int status: 0 = OK + -1 = illegal f + -2 = illegal a + + Notes: + + 1) This function is based on the GCONV2H Fortran subroutine by + Toshio Fukushima (see reference). + + 2) The equatorial radius, a, can be in any units, but meters is + the conventional choice. + + 3) The flattening, f, is (for the Earth) a value around 0.00335, + i.e. around 1/298. + + 4) The equatorial radius, a, and the geocentric vector, xyz, + must be given in the same units, and determine the units of + the returned height, height. + + 5) If an error occurs (status < 0), elong, phi and height are + unchanged. + + 6) The inverse transformation is performed in the function + eraGd2gce. + + 7) The transformation for a standard ellipsoid (such as ERFA_WGS84) can + more conveniently be performed by calling eraGc2gd, which uses a + numerical code to identify the required A and F values. + + Reference: + + Fukushima, T., "Transformation from Cartesian to geodetic + coordinates accelerated by Halley's method", J.Geodesy (2006) + 79: 689-693 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + a_in = numpy.array(a, dtype=numpy.double, order="C", copy=False, subok=True) + f_in = numpy.array(f, dtype=numpy.double, order="C", copy=False, subok=True) + xyz_in = numpy.array(xyz, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(xyz_in, (3,), "xyz") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), a_in, f_in, xyz_in[...,0]) + elong_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + height_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [a_in, f_in, xyz_in[...,0], elong_out, phi_out, height_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*4 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gc2gde(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'gc2gde') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(elong_out.shape) > 0 and elong_out.shape[0] == 1 + elong_out = elong_out.reshape(elong_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(height_out.shape) > 0 and height_out.shape[0] == 1 + height_out = height_out.reshape(height_out.shape[1:]) + + return elong_out, phi_out, height_out +STATUS_CODES['gc2gde'] = {0: 'OK', -1: 'illegal f', -2: 'illegal a'} + + + +def gd2gc(n, elong, phi, height): + """ + Wrapper for ERFA function ``eraGd2gc``. + + Parameters + ---------- + n : int array + elong : double array + phi : double array + height : double array + + Returns + ------- + xyz : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a G d 2 g c + - - - - - - - - - + + Transform geodetic coordinates to geocentric using the specified + reference ellipsoid. + + Given: + n int ellipsoid identifier (Note 1) + elong double longitude (radians, east +ve) + phi double latitude (geodetic, radians, Note 3) + height double height above ellipsoid (geodetic, Notes 2,3) + + Returned: + xyz double[3] geocentric vector (Note 2) + + Returned (function value): + int status: 0 = OK + -1 = illegal identifier (Note 3) + -2 = illegal case (Note 3) + + Notes: + + 1) The identifier n is a number that specifies the choice of + reference ellipsoid. The following are supported: + + n ellipsoid + + 1 ERFA_WGS84 + 2 ERFA_GRS80 + 3 ERFA_WGS72 + + The n value has no significance outside the ERFA software. For + convenience, symbols ERFA_WGS84 etc. are defined in erfam.h. + + 2) The height (height, given) and the geocentric vector (xyz, + returned) are in meters. + + 3) No validation is performed on the arguments elong, phi and + height. An error status -1 means that the identifier n is + illegal. An error status -2 protects against cases that would + lead to arithmetic exceptions. In all error cases, xyz is set + to zeros. + + 4) The inverse transformation is performed in the function eraGc2gd. + + Called: + eraEform Earth reference ellipsoids + eraGd2gce geodetic to geocentric transformation, general + eraZp zero p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + height_in = numpy.array(height, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), n_in, elong_in, phi_in, height_in) + xyz_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [n_in, elong_in, phi_in, height_in, xyz_out[...,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gd2gc(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'gd2gc') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(xyz_out.shape) > 0 and xyz_out.shape[0] == 1 + xyz_out = xyz_out.reshape(xyz_out.shape[1:]) + + return xyz_out +STATUS_CODES['gd2gc'] = {0: 'OK', -1: 'illegal identifier (Note 3)', -2: 'illegal case (Note 3)'} + + + +def gd2gce(a, f, elong, phi, height): + """ + Wrapper for ERFA function ``eraGd2gce``. + + Parameters + ---------- + a : double array + f : double array + elong : double array + phi : double array + height : double array + + Returns + ------- + xyz : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G d 2 g c e + - - - - - - - - - - + + Transform geodetic coordinates to geocentric for a reference + ellipsoid of specified form. + + Given: + a double equatorial radius (Notes 1,4) + f double flattening (Notes 2,4) + elong double longitude (radians, east +ve) + phi double latitude (geodetic, radians, Note 4) + height double height above ellipsoid (geodetic, Notes 3,4) + + Returned: + xyz double[3] geocentric vector (Note 3) + + Returned (function value): + int status: 0 = OK + -1 = illegal case (Note 4) + Notes: + + 1) The equatorial radius, a, can be in any units, but meters is + the conventional choice. + + 2) The flattening, f, is (for the Earth) a value around 0.00335, + i.e. around 1/298. + + 3) The equatorial radius, a, and the height, height, must be + given in the same units, and determine the units of the + returned geocentric vector, xyz. + + 4) No validation is performed on individual arguments. The error + status -1 protects against (unrealistic) cases that would lead + to arithmetic exceptions. If an error occurs, xyz is unchanged. + + 5) The inverse transformation is performed in the function + eraGc2gde. + + 6) The transformation for a standard ellipsoid (such as ERFA_WGS84) can + more conveniently be performed by calling eraGd2gc, which uses a + numerical code to identify the required a and f values. + + References: + + Green, R.M., Spherical Astronomy, Cambridge University Press, + (1985) Section 4.5, p96. + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 4.22, p202. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + a_in = numpy.array(a, dtype=numpy.double, order="C", copy=False, subok=True) + f_in = numpy.array(f, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + height_in = numpy.array(height, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), a_in, f_in, elong_in, phi_in, height_in) + xyz_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [a_in, f_in, elong_in, phi_in, height_in, xyz_out[...,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*5 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gd2gce(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'gd2gce') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(xyz_out.shape) > 0 and xyz_out.shape[0] == 1 + xyz_out = xyz_out.reshape(xyz_out.shape[1:]) + + return xyz_out +STATUS_CODES['gd2gce'] = {0: 'OK', -1: 'illegal case (Note 4)Notes:'} + + + +def d2dtf(scale, ndp, d1, d2): + """ + Wrapper for ERFA function ``eraD2dtf``. + + Parameters + ---------- + scale : const char array + ndp : int array + d1 : double array + d2 : double array + + Returns + ------- + iy : int array + im : int array + id : int array + ihmsf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a D 2 d t f + - - - - - - - - - + + Format for output a 2-part Julian Date (or in the case of UTC a + quasi-JD form that includes special provision for leap seconds). + + Given: + scale char[] time scale ID (Note 1) + ndp int resolution (Note 2) + d1,d2 double time as a 2-part Julian Date (Notes 3,4) + + Returned: + iy,im,id int year, month, day in Gregorian calendar (Note 5) + ihmsf int[4] hours, minutes, seconds, fraction (Note 1) + + Returned (function value): + int status: +1 = dubious year (Note 5) + 0 = OK + -1 = unacceptable date (Note 6) + + Notes: + + 1) scale identifies the time scale. Only the value "UTC" (in upper + case) is significant, and enables handling of leap seconds (see + Note 4). + + 2) ndp is the number of decimal places in the seconds field, and can + have negative as well as positive values, such as: + + ndp resolution + -4 1 00 00 + -3 0 10 00 + -2 0 01 00 + -1 0 00 10 + 0 0 00 01 + 1 0 00 00.1 + 2 0 00 00.01 + 3 0 00 00.001 + + The limits are platform dependent, but a safe range is -5 to +9. + + 3) d1+d2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where d1 is the Julian Day Number + and d2 is the fraction of a day. In the case of UTC, where the + use of JD is problematical, special conventions apply: see the + next note. + + 4) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The ERFA internal convention is that + the quasi-JD day represents UTC days whether the length is 86399, + 86400 or 86401 SI seconds. In the 1960-1972 era there were + smaller jumps (in either direction) each time the linear UTC(TAI) + expression was changed, and these "mini-leaps" are also included + in the ERFA convention. + + 5) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 6) For calendar conventions and limitations, see eraCal2jd. + + Called: + eraJd2cal JD to Gregorian calendar + eraD2tf decompose days to hms + eraDat delta(AT) = TAI-UTC + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + scale_in = numpy.array(scale, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + d1_in = numpy.array(d1, dtype=numpy.double, order="C", copy=False, subok=True) + d2_in = numpy.array(d2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), scale_in, ndp_in, d1_in, d2_in) + iy_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + im_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + id_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + ihmsf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [scale_in, ndp_in, d1_in, d2_in, iy_out, im_out, id_out, ihmsf_out[...,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*5 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._d2dtf(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'd2dtf') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(iy_out.shape) > 0 and iy_out.shape[0] == 1 + iy_out = iy_out.reshape(iy_out.shape[1:]) + assert len(im_out.shape) > 0 and im_out.shape[0] == 1 + im_out = im_out.reshape(im_out.shape[1:]) + assert len(id_out.shape) > 0 and id_out.shape[0] == 1 + id_out = id_out.reshape(id_out.shape[1:]) + assert len(ihmsf_out.shape) > 0 and ihmsf_out.shape[0] == 1 + ihmsf_out = ihmsf_out.reshape(ihmsf_out.shape[1:]) + + return iy_out, im_out, id_out, ihmsf_out +STATUS_CODES['d2dtf'] = {1: 'dubious year (Note 5)', 0: 'OK', -1: 'unacceptable date (Note 6)'} + + + +def dat(iy, im, id, fd): + """ + Wrapper for ERFA function ``eraDat``. + + Parameters + ---------- + iy : int array + im : int array + id : int array + fd : double array + + Returns + ------- + deltat : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a D a t + - - - - - - - + + For a given UTC date, calculate delta(AT) = TAI-UTC. + + :------------------------------------------: + : : + : IMPORTANT : + : : + : A new version of this function must be : + : produced whenever a new leap second is : + : announced. There are four items to : + : change on each such occasion: : + : : + : 1) A new line must be added to the set : + : of statements that initialize the : + : array "changes". : + : : + : 2) The constant IYV must be set to the : + : current year. : + : : + : 3) The "Latest leap second" comment : + : below must be set to the new leap : + : second date. : + : : + : 4) The "This revision" comment, later, : + : must be set to the current date. : + : : + : Change (2) must also be carried out : + : whenever the function is re-issued, : + : even if no leap seconds have been : + : added. : + : : + : Latest leap second: 2016 December 31 : + : : + :__________________________________________: + + Given: + iy int UTC: year (Notes 1 and 2) + im int month (Note 2) + id int day (Notes 2 and 3) + fd double fraction of day (Note 4) + + Returned: + deltat double TAI minus UTC, seconds + + Returned (function value): + int status (Note 5): + 1 = dubious year (Note 1) + 0 = OK + -1 = bad year + -2 = bad month + -3 = bad day (Note 3) + -4 = bad fraction (Note 4) + -5 = internal error (Note 5) + + Notes: + + 1) UTC began at 1960 January 1.0 (JD 2436934.5) and it is improper + to call the function with an earlier date. If this is attempted, + zero is returned together with a warning status. + + Because leap seconds cannot, in principle, be predicted in + advance, a reliable check for dates beyond the valid range is + impossible. To guard against gross errors, a year five or more + after the release year of the present function (see the constant + IYV) is considered dubious. In this case a warning status is + returned but the result is computed in the normal way. + + For both too-early and too-late years, the warning status is +1. + This is distinct from the error status -1, which signifies a year + so early that JD could not be computed. + + 2) If the specified date is for a day which ends with a leap second, + the TAI-UTC value returned is for the period leading up to the + leap second. If the date is for a day which begins as a leap + second ends, the TAI-UTC returned is for the period following the + leap second. + + 3) The day number must be in the normal calendar range, for example + 1 through 30 for April. The "almanac" convention of allowing + such dates as January 0 and December 32 is not supported in this + function, in order to avoid confusion near leap seconds. + + 4) The fraction of day is used only for dates before the + introduction of leap seconds, the first of which occurred at the + end of 1971. It is tested for validity (0 to 1 is the valid + range) even if not used; if invalid, zero is used and status -4 + is returned. For many applications, setting fd to zero is + acceptable; the resulting error is always less than 3 ms (and + occurs only pre-1972). + + 5) The status value returned in the case where there are multiple + errors refers to the first error detected. For example, if the + month and day are 13 and 32 respectively, status -2 (bad month) + will be returned. The "internal error" status refers to a + case that is impossible but causes some compilers to issue a + warning. + + 6) In cases where a valid result is not available, zero is returned. + + References: + + 1) For dates from 1961 January 1 onwards, the expressions from the + file ftp://maia.usno.navy.mil/ser7/tai-utc.dat are used. + + 2) The 5ms timestep at 1961 January 1 is taken from 2.58.1 (p87) of + the 1992 Explanatory Supplement. + + Called: + eraCal2jd Gregorian calendar to JD + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + iy_in = numpy.array(iy, dtype=numpy.intc, order="C", copy=False, subok=True) + im_in = numpy.array(im, dtype=numpy.intc, order="C", copy=False, subok=True) + id_in = numpy.array(id, dtype=numpy.intc, order="C", copy=False, subok=True) + fd_in = numpy.array(fd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), iy_in, im_in, id_in, fd_in) + deltat_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [iy_in, im_in, id_in, fd_in, deltat_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._dat(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'dat') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(deltat_out.shape) > 0 and deltat_out.shape[0] == 1 + deltat_out = deltat_out.reshape(deltat_out.shape[1:]) + + return deltat_out +STATUS_CODES['dat'] = {1: 'dubious year (Note 1)', 0: 'OK', -1: 'bad year', -2: 'bad month', -3: 'bad day (Note 3)', -4: 'bad fraction (Note 4)', -5: 'internal error (Note 5)'} + + + +def dtdb(date1, date2, ut, elong, u, v): + """ + Wrapper for ERFA function ``eraDtdb``. + + Parameters + ---------- + date1 : double array + date2 : double array + ut : double array + elong : double array + u : double array + v : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a D t d b + - - - - - - - - + + An approximation to TDB-TT, the difference between barycentric + dynamical time and terrestrial time, for an observer on the Earth. + + The different time scales - proper, coordinate and realized - are + related to each other: + + TAI <- physically realized + : + offset <- observed (nominally +32.184s) + : + TT <- terrestrial time + : + rate adjustment (L_G) <- definition of TT + : + TCG <- time scale for GCRS + : + "periodic" terms <- eraDtdb is an implementation + : + rate adjustment (L_C) <- function of solar-system ephemeris + : + TCB <- time scale for BCRS + : + rate adjustment (-L_B) <- definition of TDB + : + TDB <- TCB scaled to track TT + : + "periodic" terms <- -eraDtdb is an approximation + : + TT <- terrestrial time + + Adopted values for the various constants can be found in the IERS + Conventions (McCarthy & Petit 2003). + + Given: + date1,date2 double date, TDB (Notes 1-3) + ut double universal time (UT1, fraction of one day) + elong double longitude (east positive, radians) + u double distance from Earth spin axis (km) + v double distance north of equatorial plane (km) + + Returned (function value): + double TDB-TT (seconds) + + Notes: + + 1) The date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + Although the date is, formally, barycentric dynamical time (TDB), + the terrestrial dynamical time (TT) can be used with no practical + effect on the accuracy of the prediction. + + 2) TT can be regarded as a coordinate time that is realized as an + offset of 32.184s from International Atomic Time, TAI. TT is a + specific linear transformation of geocentric coordinate time TCG, + which is the time scale for the Geocentric Celestial Reference + System, GCRS. + + 3) TDB is a coordinate time, and is a specific linear transformation + of barycentric coordinate time TCB, which is the time scale for + the Barycentric Celestial Reference System, BCRS. + + 4) The difference TCG-TCB depends on the masses and positions of the + bodies of the solar system and the velocity of the Earth. It is + dominated by a rate difference, the residual being of a periodic + character. The latter, which is modeled by the present function, + comprises a main (annual) sinusoidal term of amplitude + approximately 0.00166 seconds, plus planetary terms up to about + 20 microseconds, and lunar and diurnal terms up to 2 microseconds. + These effects come from the changing transverse Doppler effect + and gravitational red-shift as the observer (on the Earth's + surface) experiences variations in speed (with respect to the + BCRS) and gravitational potential. + + 5) TDB can be regarded as the same as TCB but with a rate adjustment + to keep it close to TT, which is convenient for many applications. + The history of successive attempts to define TDB is set out in + Resolution 3 adopted by the IAU General Assembly in 2006, which + defines a fixed TDB(TCB) transformation that is consistent with + contemporary solar-system ephemerides. Future ephemerides will + imply slightly changed transformations between TCG and TCB, which + could introduce a linear drift between TDB and TT; however, any + such drift is unlikely to exceed 1 nanosecond per century. + + 6) The geocentric TDB-TT model used in the present function is that of + Fairhead & Bretagnon (1990), in its full form. It was originally + supplied by Fairhead (private communications with P.T.Wallace, + 1990) as a Fortran subroutine. The present C function contains an + adaptation of the Fairhead code. The numerical results are + essentially unaffected by the changes, the differences with + respect to the Fairhead & Bretagnon original being at the 1e-20 s + level. + + The topocentric part of the model is from Moyer (1981) and + Murray (1983), with fundamental arguments adapted from + Simon et al. 1994. It is an approximation to the expression + ( v / c ) . ( r / c ), where v is the barycentric velocity of + the Earth, r is the geocentric position of the observer and + c is the speed of light. + + By supplying zeroes for u and v, the topocentric part of the + model can be nullified, and the function will return the Fairhead + & Bretagnon result alone. + + 7) During the interval 1950-2050, the absolute accuracy is better + than +/- 3 nanoseconds relative to time ephemerides obtained by + direct numerical integrations based on the JPL DE405 solar system + ephemeris. + + 8) It must be stressed that the present function is merely a model, + and that numerical integration of solar-system ephemerides is the + definitive method for predicting the relationship between TCG and + TCB and hence between TT and TDB. + + References: + + Fairhead, L., & Bretagnon, P., Astron.Astrophys., 229, 240-247 + (1990). + + IAU 2006 Resolution 3. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Moyer, T.D., Cel.Mech., 23, 33 (1981). + + Murray, C.A., Vectorial Astrometry, Adam Hilger (1983). + + Seidelmann, P.K. et al., Explanatory Supplement to the + Astronomical Almanac, Chapter 2, University Science Books (1992). + + Simon, J.L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G. & Laskar, J., Astron.Astrophys., 282, 663-683 (1994). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + ut_in = numpy.array(ut, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + u_in = numpy.array(u, dtype=numpy.double, order="C", copy=False, subok=True) + v_in = numpy.array(v, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, ut_in, elong_in, u_in, v_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, ut_in, elong_in, u_in, v_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._dtdb(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def dtf2d(scale, iy, im, id, ihr, imn, sec): + """ + Wrapper for ERFA function ``eraDtf2d``. + + Parameters + ---------- + scale : const char array + iy : int array + im : int array + id : int array + ihr : int array + imn : int array + sec : double array + + Returns + ------- + d1 : double array + d2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a D t f 2 d + - - - - - - - - - + + Encode date and time fields into 2-part Julian Date (or in the case + of UTC a quasi-JD form that includes special provision for leap + seconds). + + Given: + scale char[] time scale ID (Note 1) + iy,im,id int year, month, day in Gregorian calendar (Note 2) + ihr,imn int hour, minute + sec double seconds + + Returned: + d1,d2 double 2-part Julian Date (Notes 3,4) + + Returned (function value): + int status: +3 = both of next two + +2 = time is after end of day (Note 5) + +1 = dubious year (Note 6) + 0 = OK + -1 = bad year + -2 = bad month + -3 = bad day + -4 = bad hour + -5 = bad minute + -6 = bad second (<0) + + Notes: + + 1) scale identifies the time scale. Only the value "UTC" (in upper + case) is significant, and enables handling of leap seconds (see + Note 4). + + 2) For calendar conventions and limitations, see eraCal2jd. + + 3) The sum of the results, d1+d2, is Julian Date, where normally d1 + is the Julian Day Number and d2 is the fraction of a day. In the + case of UTC, where the use of JD is problematical, special + conventions apply: see the next note. + + 4) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The ERFA internal convention is that + the quasi-JD day represents UTC days whether the length is 86399, + 86400 or 86401 SI seconds. In the 1960-1972 era there were + smaller jumps (in either direction) each time the linear UTC(TAI) + expression was changed, and these "mini-leaps" are also included + in the ERFA convention. + + 5) The warning status "time is after end of day" usually means that + the sec argument is greater than 60.0. However, in a day ending + in a leap second the limit changes to 61.0 (or 59.0 in the case + of a negative leap second). + + 6) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 7) Only in the case of continuous and regular time scales (TAI, TT, + TCG, TCB and TDB) is the result d1+d2 a Julian Date, strictly + speaking. In the other cases (UT1 and UTC) the result must be + used with circumspection; in particular the difference between + two such results cannot be interpreted as a precise time + interval. + + Called: + eraCal2jd Gregorian calendar to JD + eraDat delta(AT) = TAI-UTC + eraJd2cal JD to Gregorian calendar + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + scale_in = numpy.array(scale, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + iy_in = numpy.array(iy, dtype=numpy.intc, order="C", copy=False, subok=True) + im_in = numpy.array(im, dtype=numpy.intc, order="C", copy=False, subok=True) + id_in = numpy.array(id, dtype=numpy.intc, order="C", copy=False, subok=True) + ihr_in = numpy.array(ihr, dtype=numpy.intc, order="C", copy=False, subok=True) + imn_in = numpy.array(imn, dtype=numpy.intc, order="C", copy=False, subok=True) + sec_in = numpy.array(sec, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), scale_in, iy_in, im_in, id_in, ihr_in, imn_in, sec_in) + d1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + d2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [scale_in, iy_in, im_in, id_in, ihr_in, imn_in, sec_in, d1_out, d2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._dtf2d(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'dtf2d') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(d1_out.shape) > 0 and d1_out.shape[0] == 1 + d1_out = d1_out.reshape(d1_out.shape[1:]) + assert len(d2_out.shape) > 0 and d2_out.shape[0] == 1 + d2_out = d2_out.reshape(d2_out.shape[1:]) + + return d1_out, d2_out +STATUS_CODES['dtf2d'] = {3: 'both of next two', 2: 'time is after end of day (Note 5)', 1: 'dubious year (Note 6)', 0: 'OK', -1: 'bad year', -2: 'bad month', -3: 'bad day', -4: 'bad hour', -5: 'bad minute', -6: 'bad second (<0)'} + + + +def taitt(tai1, tai2): + """ + Wrapper for ERFA function ``eraTaitt``. + + Parameters + ---------- + tai1 : double array + tai2 : double array + + Returns + ------- + tt1 : double array + tt2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T a i t t + - - - - - - - - - + + Time scale transformation: International Atomic Time, TAI, to + Terrestrial Time, TT. + + Given: + tai1,tai2 double TAI as a 2-part Julian Date + + Returned: + tt1,tt2 double TT as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Note: + + tai1+tai2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tai1 is the Julian + Day Number and tai2 is the fraction of a day. The returned + tt1,tt2 follow suit. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tai1_in = numpy.array(tai1, dtype=numpy.double, order="C", copy=False, subok=True) + tai2_in = numpy.array(tai2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tai1_in, tai2_in) + tt1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tt2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tai1_in, tai2_in, tt1_out, tt2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._taitt(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'taitt') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tt1_out.shape) > 0 and tt1_out.shape[0] == 1 + tt1_out = tt1_out.reshape(tt1_out.shape[1:]) + assert len(tt2_out.shape) > 0 and tt2_out.shape[0] == 1 + tt2_out = tt2_out.reshape(tt2_out.shape[1:]) + + return tt1_out, tt2_out +STATUS_CODES['taitt'] = {0: 'OK'} + + + +def taiut1(tai1, tai2, dta): + """ + Wrapper for ERFA function ``eraTaiut1``. + + Parameters + ---------- + tai1 : double array + tai2 : double array + dta : double array + + Returns + ------- + ut11 : double array + ut12 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a T a i u t 1 + - - - - - - - - - - + + Time scale transformation: International Atomic Time, TAI, to + Universal Time, UT1. + + Given: + tai1,tai2 double TAI as a 2-part Julian Date + dta double UT1-TAI in seconds + + Returned: + ut11,ut12 double UT1 as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tai1+tai2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tai1 is the Julian + Day Number and tai2 is the fraction of a day. The returned + UT11,UT12 follow suit. + + 2) The argument dta, i.e. UT1-TAI, is an observed quantity, and is + available from IERS tabulations. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tai1_in = numpy.array(tai1, dtype=numpy.double, order="C", copy=False, subok=True) + tai2_in = numpy.array(tai2, dtype=numpy.double, order="C", copy=False, subok=True) + dta_in = numpy.array(dta, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tai1_in, tai2_in, dta_in) + ut11_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + ut12_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tai1_in, tai2_in, dta_in, ut11_out, ut12_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._taiut1(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'taiut1') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ut11_out.shape) > 0 and ut11_out.shape[0] == 1 + ut11_out = ut11_out.reshape(ut11_out.shape[1:]) + assert len(ut12_out.shape) > 0 and ut12_out.shape[0] == 1 + ut12_out = ut12_out.reshape(ut12_out.shape[1:]) + + return ut11_out, ut12_out +STATUS_CODES['taiut1'] = {0: 'OK'} + + + +def taiutc(tai1, tai2): + """ + Wrapper for ERFA function ``eraTaiutc``. + + Parameters + ---------- + tai1 : double array + tai2 : double array + + Returns + ------- + utc1 : double array + utc2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a T a i u t c + - - - - - - - - - - + + Time scale transformation: International Atomic Time, TAI, to + Coordinated Universal Time, UTC. + + Given: + tai1,tai2 double TAI as a 2-part Julian Date (Note 1) + + Returned: + utc1,utc2 double UTC as a 2-part quasi Julian Date (Notes 1-3) + + Returned (function value): + int status: +1 = dubious year (Note 4) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) tai1+tai2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tai1 is the Julian + Day Number and tai2 is the fraction of a day. The returned utc1 + and utc2 form an analogous pair, except that a special convention + is used, to deal with the problem of leap seconds - see the next + note. + + 2) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The convention in the present + function is that the JD day represents UTC days whether the + length is 86399, 86400 or 86401 SI seconds. In the 1960-1972 era + there were smaller jumps (in either direction) each time the + linear UTC(TAI) expression was changed, and these "mini-leaps" + are also included in the ERFA convention. + + 3) The function eraD2dtf can be used to transform the UTC quasi-JD + into calendar date and clock time, including UTC leap second + handling. + + 4) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + Called: + eraUtctai UTC to TAI + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tai1_in = numpy.array(tai1, dtype=numpy.double, order="C", copy=False, subok=True) + tai2_in = numpy.array(tai2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tai1_in, tai2_in) + utc1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + utc2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tai1_in, tai2_in, utc1_out, utc2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._taiutc(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'taiutc') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(utc1_out.shape) > 0 and utc1_out.shape[0] == 1 + utc1_out = utc1_out.reshape(utc1_out.shape[1:]) + assert len(utc2_out.shape) > 0 and utc2_out.shape[0] == 1 + utc2_out = utc2_out.reshape(utc2_out.shape[1:]) + + return utc1_out, utc2_out +STATUS_CODES['taiutc'] = {1: 'dubious year (Note 4)', 0: 'OK', -1: 'unacceptable date'} + + + +def tcbtdb(tcb1, tcb2): + """ + Wrapper for ERFA function ``eraTcbtdb``. + + Parameters + ---------- + tcb1 : double array + tcb2 : double array + + Returns + ------- + tdb1 : double array + tdb2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a T c b t d b + - - - - - - - - - - + + Time scale transformation: Barycentric Coordinate Time, TCB, to + Barycentric Dynamical Time, TDB. + + Given: + tcb1,tcb2 double TCB as a 2-part Julian Date + + Returned: + tdb1,tdb2 double TDB as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tcb1+tcb2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tcb1 is the Julian + Day Number and tcb2 is the fraction of a day. The returned + tdb1,tdb2 follow suit. + + 2) The 2006 IAU General Assembly introduced a conventional linear + transformation between TDB and TCB. This transformation + compensates for the drift between TCB and terrestrial time TT, + and keeps TDB approximately centered on TT. Because the + relationship between TT and TCB depends on the adopted solar + system ephemeris, the degree of alignment between TDB and TT over + long intervals will vary according to which ephemeris is used. + Former definitions of TDB attempted to avoid this problem by + stipulating that TDB and TT should differ only by periodic + effects. This is a good description of the nature of the + relationship but eluded precise mathematical formulation. The + conventional linear relationship adopted in 2006 sidestepped + these difficulties whilst delivering a TDB that in practice was + consistent with values before that date. + + 3) TDB is essentially the same as Teph, the time argument for the + JPL solar system ephemerides. + + Reference: + + IAU 2006 Resolution B3 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tcb1_in = numpy.array(tcb1, dtype=numpy.double, order="C", copy=False, subok=True) + tcb2_in = numpy.array(tcb2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tcb1_in, tcb2_in) + tdb1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tdb2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tcb1_in, tcb2_in, tdb1_out, tdb2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tcbtdb(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tcbtdb') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tdb1_out.shape) > 0 and tdb1_out.shape[0] == 1 + tdb1_out = tdb1_out.reshape(tdb1_out.shape[1:]) + assert len(tdb2_out.shape) > 0 and tdb2_out.shape[0] == 1 + tdb2_out = tdb2_out.reshape(tdb2_out.shape[1:]) + + return tdb1_out, tdb2_out +STATUS_CODES['tcbtdb'] = {0: 'OK'} + + + +def tcgtt(tcg1, tcg2): + """ + Wrapper for ERFA function ``eraTcgtt``. + + Parameters + ---------- + tcg1 : double array + tcg2 : double array + + Returns + ------- + tt1 : double array + tt2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T c g t t + - - - - - - - - - + + Time scale transformation: Geocentric Coordinate Time, TCG, to + Terrestrial Time, TT. + + Given: + tcg1,tcg2 double TCG as a 2-part Julian Date + + Returned: + tt1,tt2 double TT as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Note: + + tcg1+tcg2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tcg1 is the Julian + Day Number and tcg22 is the fraction of a day. The returned + tt1,tt2 follow suit. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003),. + IERS Technical Note No. 32, BKG (2004) + + IAU 2000 Resolution B1.9 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tcg1_in = numpy.array(tcg1, dtype=numpy.double, order="C", copy=False, subok=True) + tcg2_in = numpy.array(tcg2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tcg1_in, tcg2_in) + tt1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tt2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tcg1_in, tcg2_in, tt1_out, tt2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tcgtt(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tcgtt') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tt1_out.shape) > 0 and tt1_out.shape[0] == 1 + tt1_out = tt1_out.reshape(tt1_out.shape[1:]) + assert len(tt2_out.shape) > 0 and tt2_out.shape[0] == 1 + tt2_out = tt2_out.reshape(tt2_out.shape[1:]) + + return tt1_out, tt2_out +STATUS_CODES['tcgtt'] = {0: 'OK'} + + + +def tdbtcb(tdb1, tdb2): + """ + Wrapper for ERFA function ``eraTdbtcb``. + + Parameters + ---------- + tdb1 : double array + tdb2 : double array + + Returns + ------- + tcb1 : double array + tcb2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a T d b t c b + - - - - - - - - - - + + Time scale transformation: Barycentric Dynamical Time, TDB, to + Barycentric Coordinate Time, TCB. + + Given: + tdb1,tdb2 double TDB as a 2-part Julian Date + + Returned: + tcb1,tcb2 double TCB as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tdb1+tdb2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tdb1 is the Julian + Day Number and tdb2 is the fraction of a day. The returned + tcb1,tcb2 follow suit. + + 2) The 2006 IAU General Assembly introduced a conventional linear + transformation between TDB and TCB. This transformation + compensates for the drift between TCB and terrestrial time TT, + and keeps TDB approximately centered on TT. Because the + relationship between TT and TCB depends on the adopted solar + system ephemeris, the degree of alignment between TDB and TT over + long intervals will vary according to which ephemeris is used. + Former definitions of TDB attempted to avoid this problem by + stipulating that TDB and TT should differ only by periodic + effects. This is a good description of the nature of the + relationship but eluded precise mathematical formulation. The + conventional linear relationship adopted in 2006 sidestepped + these difficulties whilst delivering a TDB that in practice was + consistent with values before that date. + + 3) TDB is essentially the same as Teph, the time argument for the + JPL solar system ephemerides. + + Reference: + + IAU 2006 Resolution B3 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tdb1_in = numpy.array(tdb1, dtype=numpy.double, order="C", copy=False, subok=True) + tdb2_in = numpy.array(tdb2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tdb1_in, tdb2_in) + tcb1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tcb2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tdb1_in, tdb2_in, tcb1_out, tcb2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tdbtcb(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tdbtcb') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tcb1_out.shape) > 0 and tcb1_out.shape[0] == 1 + tcb1_out = tcb1_out.reshape(tcb1_out.shape[1:]) + assert len(tcb2_out.shape) > 0 and tcb2_out.shape[0] == 1 + tcb2_out = tcb2_out.reshape(tcb2_out.shape[1:]) + + return tcb1_out, tcb2_out +STATUS_CODES['tdbtcb'] = {0: 'OK'} + + + +def tdbtt(tdb1, tdb2, dtr): + """ + Wrapper for ERFA function ``eraTdbtt``. + + Parameters + ---------- + tdb1 : double array + tdb2 : double array + dtr : double array + + Returns + ------- + tt1 : double array + tt2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T d b t t + - - - - - - - - - + + Time scale transformation: Barycentric Dynamical Time, TDB, to + Terrestrial Time, TT. + + Given: + tdb1,tdb2 double TDB as a 2-part Julian Date + dtr double TDB-TT in seconds + + Returned: + tt1,tt2 double TT as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tdb1+tdb2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tdb1 is the Julian + Day Number and tdb2 is the fraction of a day. The returned + tt1,tt2 follow suit. + + 2) The argument dtr represents the quasi-periodic component of the + GR transformation between TT and TCB. It is dependent upon the + adopted solar-system ephemeris, and can be obtained by numerical + integration, by interrogating a precomputed time ephemeris or by + evaluating a model such as that implemented in the ERFA function + eraDtdb. The quantity is dominated by an annual term of 1.7 ms + amplitude. + + 3) TDB is essentially the same as Teph, the time argument for the + JPL solar system ephemerides. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + IAU 2006 Resolution 3 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tdb1_in = numpy.array(tdb1, dtype=numpy.double, order="C", copy=False, subok=True) + tdb2_in = numpy.array(tdb2, dtype=numpy.double, order="C", copy=False, subok=True) + dtr_in = numpy.array(dtr, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tdb1_in, tdb2_in, dtr_in) + tt1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tt2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tdb1_in, tdb2_in, dtr_in, tt1_out, tt2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tdbtt(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tdbtt') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tt1_out.shape) > 0 and tt1_out.shape[0] == 1 + tt1_out = tt1_out.reshape(tt1_out.shape[1:]) + assert len(tt2_out.shape) > 0 and tt2_out.shape[0] == 1 + tt2_out = tt2_out.reshape(tt2_out.shape[1:]) + + return tt1_out, tt2_out +STATUS_CODES['tdbtt'] = {0: 'OK'} + + + +def tttai(tt1, tt2): + """ + Wrapper for ERFA function ``eraTttai``. + + Parameters + ---------- + tt1 : double array + tt2 : double array + + Returns + ------- + tai1 : double array + tai2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T t t a i + - - - - - - - - - + + Time scale transformation: Terrestrial Time, TT, to International + Atomic Time, TAI. + + Given: + tt1,tt2 double TT as a 2-part Julian Date + + Returned: + tai1,tai2 double TAI as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Note: + + tt1+tt2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where tt1 is the Julian Day Number + and tt2 is the fraction of a day. The returned tai1,tai2 follow + suit. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tt1_in = numpy.array(tt1, dtype=numpy.double, order="C", copy=False, subok=True) + tt2_in = numpy.array(tt2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tt1_in, tt2_in) + tai1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tai2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tt1_in, tt2_in, tai1_out, tai2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tttai(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tttai') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tai1_out.shape) > 0 and tai1_out.shape[0] == 1 + tai1_out = tai1_out.reshape(tai1_out.shape[1:]) + assert len(tai2_out.shape) > 0 and tai2_out.shape[0] == 1 + tai2_out = tai2_out.reshape(tai2_out.shape[1:]) + + return tai1_out, tai2_out +STATUS_CODES['tttai'] = {0: 'OK'} + + + +def tttcg(tt1, tt2): + """ + Wrapper for ERFA function ``eraTttcg``. + + Parameters + ---------- + tt1 : double array + tt2 : double array + + Returns + ------- + tcg1 : double array + tcg2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T t t c g + - - - - - - - - - + + Time scale transformation: Terrestrial Time, TT, to Geocentric + Coordinate Time, TCG. + + Given: + tt1,tt2 double TT as a 2-part Julian Date + + Returned: + tcg1,tcg2 double TCG as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Note: + + tt1+tt2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where tt1 is the Julian Day Number + and tt2 is the fraction of a day. The returned tcg1,tcg2 follow + suit. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + IAU 2000 Resolution B1.9 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tt1_in = numpy.array(tt1, dtype=numpy.double, order="C", copy=False, subok=True) + tt2_in = numpy.array(tt2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tt1_in, tt2_in) + tcg1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tcg2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tt1_in, tt2_in, tcg1_out, tcg2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tttcg(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tttcg') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tcg1_out.shape) > 0 and tcg1_out.shape[0] == 1 + tcg1_out = tcg1_out.reshape(tcg1_out.shape[1:]) + assert len(tcg2_out.shape) > 0 and tcg2_out.shape[0] == 1 + tcg2_out = tcg2_out.reshape(tcg2_out.shape[1:]) + + return tcg1_out, tcg2_out +STATUS_CODES['tttcg'] = {0: 'OK'} + + + +def tttdb(tt1, tt2, dtr): + """ + Wrapper for ERFA function ``eraTttdb``. + + Parameters + ---------- + tt1 : double array + tt2 : double array + dtr : double array + + Returns + ------- + tdb1 : double array + tdb2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T t t d b + - - - - - - - - - + + Time scale transformation: Terrestrial Time, TT, to Barycentric + Dynamical Time, TDB. + + Given: + tt1,tt2 double TT as a 2-part Julian Date + dtr double TDB-TT in seconds + + Returned: + tdb1,tdb2 double TDB as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tt1+tt2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where tt1 is the Julian Day Number + and tt2 is the fraction of a day. The returned tdb1,tdb2 follow + suit. + + 2) The argument dtr represents the quasi-periodic component of the + GR transformation between TT and TCB. It is dependent upon the + adopted solar-system ephemeris, and can be obtained by numerical + integration, by interrogating a precomputed time ephemeris or by + evaluating a model such as that implemented in the ERFA function + eraDtdb. The quantity is dominated by an annual term of 1.7 ms + amplitude. + + 3) TDB is essentially the same as Teph, the time argument for the JPL + solar system ephemerides. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + IAU 2006 Resolution 3 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tt1_in = numpy.array(tt1, dtype=numpy.double, order="C", copy=False, subok=True) + tt2_in = numpy.array(tt2, dtype=numpy.double, order="C", copy=False, subok=True) + dtr_in = numpy.array(dtr, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tt1_in, tt2_in, dtr_in) + tdb1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tdb2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tt1_in, tt2_in, dtr_in, tdb1_out, tdb2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tttdb(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tttdb') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tdb1_out.shape) > 0 and tdb1_out.shape[0] == 1 + tdb1_out = tdb1_out.reshape(tdb1_out.shape[1:]) + assert len(tdb2_out.shape) > 0 and tdb2_out.shape[0] == 1 + tdb2_out = tdb2_out.reshape(tdb2_out.shape[1:]) + + return tdb1_out, tdb2_out +STATUS_CODES['tttdb'] = {0: 'OK'} + + + +def ttut1(tt1, tt2, dt): + """ + Wrapper for ERFA function ``eraTtut1``. + + Parameters + ---------- + tt1 : double array + tt2 : double array + dt : double array + + Returns + ------- + ut11 : double array + ut12 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T t u t 1 + - - - - - - - - - + + Time scale transformation: Terrestrial Time, TT, to Universal Time, + UT1. + + Given: + tt1,tt2 double TT as a 2-part Julian Date + dt double TT-UT1 in seconds + + Returned: + ut11,ut12 double UT1 as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tt1+tt2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where tt1 is the Julian Day Number + and tt2 is the fraction of a day. The returned ut11,ut12 follow + suit. + + 2) The argument dt is classical Delta T. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tt1_in = numpy.array(tt1, dtype=numpy.double, order="C", copy=False, subok=True) + tt2_in = numpy.array(tt2, dtype=numpy.double, order="C", copy=False, subok=True) + dt_in = numpy.array(dt, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tt1_in, tt2_in, dt_in) + ut11_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + ut12_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tt1_in, tt2_in, dt_in, ut11_out, ut12_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ttut1(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'ttut1') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ut11_out.shape) > 0 and ut11_out.shape[0] == 1 + ut11_out = ut11_out.reshape(ut11_out.shape[1:]) + assert len(ut12_out.shape) > 0 and ut12_out.shape[0] == 1 + ut12_out = ut12_out.reshape(ut12_out.shape[1:]) + + return ut11_out, ut12_out +STATUS_CODES['ttut1'] = {0: 'OK'} + + + +def ut1tai(ut11, ut12, dta): + """ + Wrapper for ERFA function ``eraUt1tai``. + + Parameters + ---------- + ut11 : double array + ut12 : double array + dta : double array + + Returns + ------- + tai1 : double array + tai2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a U t 1 t a i + - - - - - - - - - - + + Time scale transformation: Universal Time, UT1, to International + Atomic Time, TAI. + + Given: + ut11,ut12 double UT1 as a 2-part Julian Date + dta double UT1-TAI in seconds + + Returned: + tai1,tai2 double TAI as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) ut11+ut12 is Julian Date, apportioned in any convenient way + between the two arguments, for example where ut11 is the Julian + Day Number and ut12 is the fraction of a day. The returned + tai1,tai2 follow suit. + + 2) The argument dta, i.e. UT1-TAI, is an observed quantity, and is + available from IERS tabulations. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ut11_in = numpy.array(ut11, dtype=numpy.double, order="C", copy=False, subok=True) + ut12_in = numpy.array(ut12, dtype=numpy.double, order="C", copy=False, subok=True) + dta_in = numpy.array(dta, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ut11_in, ut12_in, dta_in) + tai1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tai2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ut11_in, ut12_in, dta_in, tai1_out, tai2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ut1tai(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'ut1tai') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tai1_out.shape) > 0 and tai1_out.shape[0] == 1 + tai1_out = tai1_out.reshape(tai1_out.shape[1:]) + assert len(tai2_out.shape) > 0 and tai2_out.shape[0] == 1 + tai2_out = tai2_out.reshape(tai2_out.shape[1:]) + + return tai1_out, tai2_out +STATUS_CODES['ut1tai'] = {0: 'OK'} + + + +def ut1tt(ut11, ut12, dt): + """ + Wrapper for ERFA function ``eraUt1tt``. + + Parameters + ---------- + ut11 : double array + ut12 : double array + dt : double array + + Returns + ------- + tt1 : double array + tt2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a U t 1 t t + - - - - - - - - - + + Time scale transformation: Universal Time, UT1, to Terrestrial + Time, TT. + + Given: + ut11,ut12 double UT1 as a 2-part Julian Date + dt double TT-UT1 in seconds + + Returned: + tt1,tt2 double TT as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) ut11+ut12 is Julian Date, apportioned in any convenient way + between the two arguments, for example where ut11 is the Julian + Day Number and ut12 is the fraction of a day. The returned + tt1,tt2 follow suit. + + 2) The argument dt is classical Delta T. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ut11_in = numpy.array(ut11, dtype=numpy.double, order="C", copy=False, subok=True) + ut12_in = numpy.array(ut12, dtype=numpy.double, order="C", copy=False, subok=True) + dt_in = numpy.array(dt, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ut11_in, ut12_in, dt_in) + tt1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tt2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ut11_in, ut12_in, dt_in, tt1_out, tt2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ut1tt(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'ut1tt') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tt1_out.shape) > 0 and tt1_out.shape[0] == 1 + tt1_out = tt1_out.reshape(tt1_out.shape[1:]) + assert len(tt2_out.shape) > 0 and tt2_out.shape[0] == 1 + tt2_out = tt2_out.reshape(tt2_out.shape[1:]) + + return tt1_out, tt2_out +STATUS_CODES['ut1tt'] = {0: 'OK'} + + + +def ut1utc(ut11, ut12, dut1): + """ + Wrapper for ERFA function ``eraUt1utc``. + + Parameters + ---------- + ut11 : double array + ut12 : double array + dut1 : double array + + Returns + ------- + utc1 : double array + utc2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a U t 1 u t c + - - - - - - - - - - + + Time scale transformation: Universal Time, UT1, to Coordinated + Universal Time, UTC. + + Given: + ut11,ut12 double UT1 as a 2-part Julian Date (Note 1) + dut1 double Delta UT1: UT1-UTC in seconds (Note 2) + + Returned: + utc1,utc2 double UTC as a 2-part quasi Julian Date (Notes 3,4) + + Returned (function value): + int status: +1 = dubious year (Note 5) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) ut11+ut12 is Julian Date, apportioned in any convenient way + between the two arguments, for example where ut11 is the Julian + Day Number and ut12 is the fraction of a day. The returned utc1 + and utc2 form an analogous pair, except that a special convention + is used, to deal with the problem of leap seconds - see Note 3. + + 2) Delta UT1 can be obtained from tabulations provided by the + International Earth Rotation and Reference Systems Service. The + value changes abruptly by 1s at a leap second; however, close to + a leap second the algorithm used here is tolerant of the "wrong" + choice of value being made. + + 3) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The convention in the present + function is that the returned quasi JD day UTC1+UTC2 represents + UTC days whether the length is 86399, 86400 or 86401 SI seconds. + + 4) The function eraD2dtf can be used to transform the UTC quasi-JD + into calendar date and clock time, including UTC leap second + handling. + + 5) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + Called: + eraJd2cal JD to Gregorian calendar + eraDat delta(AT) = TAI-UTC + eraCal2jd Gregorian calendar to JD + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ut11_in = numpy.array(ut11, dtype=numpy.double, order="C", copy=False, subok=True) + ut12_in = numpy.array(ut12, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ut11_in, ut12_in, dut1_in) + utc1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + utc2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ut11_in, ut12_in, dut1_in, utc1_out, utc2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ut1utc(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'ut1utc') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(utc1_out.shape) > 0 and utc1_out.shape[0] == 1 + utc1_out = utc1_out.reshape(utc1_out.shape[1:]) + assert len(utc2_out.shape) > 0 and utc2_out.shape[0] == 1 + utc2_out = utc2_out.reshape(utc2_out.shape[1:]) + + return utc1_out, utc2_out +STATUS_CODES['ut1utc'] = {1: 'dubious year (Note 5)', 0: 'OK', -1: 'unacceptable date'} + + + +def utctai(utc1, utc2): + """ + Wrapper for ERFA function ``eraUtctai``. + + Parameters + ---------- + utc1 : double array + utc2 : double array + + Returns + ------- + tai1 : double array + tai2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a U t c t a i + - - - - - - - - - - + + Time scale transformation: Coordinated Universal Time, UTC, to + International Atomic Time, TAI. + + Given: + utc1,utc2 double UTC as a 2-part quasi Julian Date (Notes 1-4) + + Returned: + tai1,tai2 double TAI as a 2-part Julian Date (Note 5) + + Returned (function value): + int status: +1 = dubious year (Note 3) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + 2) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The convention in the present + function is that the JD day represents UTC days whether the + length is 86399, 86400 or 86401 SI seconds. In the 1960-1972 era + there were smaller jumps (in either direction) each time the + linear UTC(TAI) expression was changed, and these "mini-leaps" + are also included in the ERFA convention. + + 3) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 4) The function eraDtf2d converts from calendar date and time of day + into 2-part Julian Date, and in the case of UTC implements the + leap-second-ambiguity convention described above. + + 5) The returned TAI1,TAI2 are such that their sum is the TAI Julian + Date. + + Called: + eraJd2cal JD to Gregorian calendar + eraDat delta(AT) = TAI-UTC + eraCal2jd Gregorian calendar to JD + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), utc1_in, utc2_in) + tai1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tai2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [utc1_in, utc2_in, tai1_out, tai2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._utctai(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'utctai') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tai1_out.shape) > 0 and tai1_out.shape[0] == 1 + tai1_out = tai1_out.reshape(tai1_out.shape[1:]) + assert len(tai2_out.shape) > 0 and tai2_out.shape[0] == 1 + tai2_out = tai2_out.reshape(tai2_out.shape[1:]) + + return tai1_out, tai2_out +STATUS_CODES['utctai'] = {1: 'dubious year (Note 3)', 0: 'OK', -1: 'unacceptable date'} + + + +def utcut1(utc1, utc2, dut1): + """ + Wrapper for ERFA function ``eraUtcut1``. + + Parameters + ---------- + utc1 : double array + utc2 : double array + dut1 : double array + + Returns + ------- + ut11 : double array + ut12 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a U t c u t 1 + - - - - - - - - - - + + Time scale transformation: Coordinated Universal Time, UTC, to + Universal Time, UT1. + + Given: + utc1,utc2 double UTC as a 2-part quasi Julian Date (Notes 1-4) + dut1 double Delta UT1 = UT1-UTC in seconds (Note 5) + + Returned: + ut11,ut12 double UT1 as a 2-part Julian Date (Note 6) + + Returned (function value): + int status: +1 = dubious year (Note 3) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + 2) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The convention in the present + function is that the JD day represents UTC days whether the + length is 86399, 86400 or 86401 SI seconds. + + 3) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 4) The function eraDtf2d converts from calendar date and time of + day into 2-part Julian Date, and in the case of UTC implements + the leap-second-ambiguity convention described above. + + 5) Delta UT1 can be obtained from tabulations provided by the + International Earth Rotation and Reference Systems Service. + It is the caller's responsibility to supply a dut1 argument + containing the UT1-UTC value that matches the given UTC. + + 6) The returned ut11,ut12 are such that their sum is the UT1 Julian + Date. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Called: + eraJd2cal JD to Gregorian calendar + eraDat delta(AT) = TAI-UTC + eraUtctai UTC to TAI + eraTaiut1 TAI to UT1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), utc1_in, utc2_in, dut1_in) + ut11_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + ut12_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [utc1_in, utc2_in, dut1_in, ut11_out, ut12_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._utcut1(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'utcut1') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ut11_out.shape) > 0 and ut11_out.shape[0] == 1 + ut11_out = ut11_out.reshape(ut11_out.shape[1:]) + assert len(ut12_out.shape) > 0 and ut12_out.shape[0] == 1 + ut12_out = ut12_out.reshape(ut12_out.shape[1:]) + + return ut11_out, ut12_out +STATUS_CODES['utcut1'] = {1: 'dubious year (Note 3)', 0: 'OK', -1: 'unacceptable date'} + + + +def a2af(ndp, angle): + """ + Wrapper for ERFA function ``eraA2af``. + + Parameters + ---------- + ndp : int array + angle : double array + + Returns + ------- + sign : char array + idmsf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A 2 a f + - - - - - - - - + + Decompose radians into degrees, arcminutes, arcseconds, fraction. + + Given: + ndp int resolution (Note 1) + angle double angle in radians + + Returned: + sign char '+' or '-' + idmsf int[4] degrees, arcminutes, arcseconds, fraction + + Called: + eraD2tf decompose days to hms + + Notes: + + 1) The argument ndp is interpreted as follows: + + ndp resolution + : ...0000 00 00 + -7 1000 00 00 + -6 100 00 00 + -5 10 00 00 + -4 1 00 00 + -3 0 10 00 + -2 0 01 00 + -1 0 00 10 + 0 0 00 01 + 1 0 00 00.1 + 2 0 00 00.01 + 3 0 00 00.001 + : 0 00 00.000... + + 2) The largest positive useful value for ndp is determined by the + size of angle, the format of doubles on the target platform, and + the risk of overflowing idmsf[3]. On a typical platform, for + angle up to 2pi, the available floating-point precision might + correspond to ndp=12. However, the practical limit is typically + ndp=9, set by the capacity of a 32-bit int, or ndp=4 if int is + only 16 bits. + + 3) The absolute value of angle may exceed 2pi. In cases where it + does not, it is up to the caller to test for and handle the + case where angle is very nearly 2pi and rounds up to 360 degrees, + by testing for idmsf[0]=360 and setting idmsf[0-3] to zero. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + angle_in = numpy.array(angle, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ndp_in, angle_in) + sign_out = numpy.empty(broadcast.shape + (), dtype=numpy.dtype('S1')) + idmsf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ndp_in, angle_in, sign_out, idmsf_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._a2af(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(sign_out.shape) > 0 and sign_out.shape[0] == 1 + sign_out = sign_out.reshape(sign_out.shape[1:]) + assert len(idmsf_out.shape) > 0 and idmsf_out.shape[0] == 1 + idmsf_out = idmsf_out.reshape(idmsf_out.shape[1:]) + + return sign_out, idmsf_out + + +def a2tf(ndp, angle): + """ + Wrapper for ERFA function ``eraA2tf``. + + Parameters + ---------- + ndp : int array + angle : double array + + Returns + ------- + sign : char array + ihmsf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A 2 t f + - - - - - - - - + + Decompose radians into hours, minutes, seconds, fraction. + + Given: + ndp int resolution (Note 1) + angle double angle in radians + + Returned: + sign char '+' or '-' + ihmsf int[4] hours, minutes, seconds, fraction + + Called: + eraD2tf decompose days to hms + + Notes: + + 1) The argument ndp is interpreted as follows: + + ndp resolution + : ...0000 00 00 + -7 1000 00 00 + -6 100 00 00 + -5 10 00 00 + -4 1 00 00 + -3 0 10 00 + -2 0 01 00 + -1 0 00 10 + 0 0 00 01 + 1 0 00 00.1 + 2 0 00 00.01 + 3 0 00 00.001 + : 0 00 00.000... + + 2) The largest positive useful value for ndp is determined by the + size of angle, the format of doubles on the target platform, and + the risk of overflowing ihmsf[3]. On a typical platform, for + angle up to 2pi, the available floating-point precision might + correspond to ndp=12. However, the practical limit is typically + ndp=9, set by the capacity of a 32-bit int, or ndp=4 if int is + only 16 bits. + + 3) The absolute value of angle may exceed 2pi. In cases where it + does not, it is up to the caller to test for and handle the + case where angle is very nearly 2pi and rounds up to 24 hours, + by testing for ihmsf[0]=24 and setting ihmsf[0-3] to zero. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + angle_in = numpy.array(angle, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ndp_in, angle_in) + sign_out = numpy.empty(broadcast.shape + (), dtype=numpy.dtype('S1')) + ihmsf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ndp_in, angle_in, sign_out, ihmsf_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._a2tf(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(sign_out.shape) > 0 and sign_out.shape[0] == 1 + sign_out = sign_out.reshape(sign_out.shape[1:]) + assert len(ihmsf_out.shape) > 0 and ihmsf_out.shape[0] == 1 + ihmsf_out = ihmsf_out.reshape(ihmsf_out.shape[1:]) + + return sign_out, ihmsf_out + + +def af2a(s, ideg, iamin, asec): + """ + Wrapper for ERFA function ``eraAf2a``. + + Parameters + ---------- + s : char array + ideg : int array + iamin : int array + asec : double array + + Returns + ------- + rad : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A f 2 a + - - - - - - - - + + Convert degrees, arcminutes, arcseconds to radians. + + Given: + s char sign: '-' = negative, otherwise positive + ideg int degrees + iamin int arcminutes + asec double arcseconds + + Returned: + rad double angle in radians + + Returned (function value): + int status: 0 = OK + 1 = ideg outside range 0-359 + 2 = iamin outside range 0-59 + 3 = asec outside range 0-59.999... + + Notes: + + 1) The result is computed even if any of the range checks fail. + + 2) Negative ideg, iamin and/or asec produce a warning status, but + the absolute value is used in the conversion. + + 3) If there are multiple errors, the status value reflects only the + first, the smallest taking precedence. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + s_in = numpy.array(s, dtype=numpy.dtype('S1'), order="C", copy=False, subok=True) + ideg_in = numpy.array(ideg, dtype=numpy.intc, order="C", copy=False, subok=True) + iamin_in = numpy.array(iamin, dtype=numpy.intc, order="C", copy=False, subok=True) + asec_in = numpy.array(asec, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), s_in, ideg_in, iamin_in, asec_in) + rad_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [s_in, ideg_in, iamin_in, asec_in, rad_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._af2a(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'af2a') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rad_out.shape) > 0 and rad_out.shape[0] == 1 + rad_out = rad_out.reshape(rad_out.shape[1:]) + + return rad_out +STATUS_CODES['af2a'] = {0: 'OK', 1: 'ideg outside range 0-359', 2: 'iamin outside range 0-59', 3: 'asec outside range 0-59.999...'} + + + +def anp(a): + """ + Wrapper for ERFA function ``eraAnp``. + + Parameters + ---------- + a : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a A n p + - - - - - - - + + Normalize angle into the range 0 <= a < 2pi. + + Given: + a double angle (radians) + + Returned (function value): + double angle in range 0-2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + a_in = numpy.array(a, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), a_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [a_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._anp(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def anpm(a): + """ + Wrapper for ERFA function ``eraAnpm``. + + Parameters + ---------- + a : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A n p m + - - - - - - - - + + Normalize angle into the range -pi <= a < +pi. + + Given: + a double angle (radians) + + Returned (function value): + double angle in range +/-pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + a_in = numpy.array(a, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), a_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [a_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._anpm(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def d2tf(ndp, days): + """ + Wrapper for ERFA function ``eraD2tf``. + + Parameters + ---------- + ndp : int array + days : double array + + Returns + ------- + sign : char array + ihmsf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a D 2 t f + - - - - - - - - + + Decompose days to hours, minutes, seconds, fraction. + + Given: + ndp int resolution (Note 1) + days double interval in days + + Returned: + sign char '+' or '-' + ihmsf int[4] hours, minutes, seconds, fraction + + Notes: + + 1) The argument ndp is interpreted as follows: + + ndp resolution + : ...0000 00 00 + -7 1000 00 00 + -6 100 00 00 + -5 10 00 00 + -4 1 00 00 + -3 0 10 00 + -2 0 01 00 + -1 0 00 10 + 0 0 00 01 + 1 0 00 00.1 + 2 0 00 00.01 + 3 0 00 00.001 + : 0 00 00.000... + + 2) The largest positive useful value for ndp is determined by the + size of days, the format of double on the target platform, and + the risk of overflowing ihmsf[3]. On a typical platform, for + days up to 1.0, the available floating-point precision might + correspond to ndp=12. However, the practical limit is typically + ndp=9, set by the capacity of a 32-bit int, or ndp=4 if int is + only 16 bits. + + 3) The absolute value of days may exceed 1.0. In cases where it + does not, it is up to the caller to test for and handle the + case where days is very nearly 1.0 and rounds up to 24 hours, + by testing for ihmsf[0]=24 and setting ihmsf[0-3] to zero. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + days_in = numpy.array(days, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ndp_in, days_in) + sign_out = numpy.empty(broadcast.shape + (), dtype=numpy.dtype('S1')) + ihmsf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ndp_in, days_in, sign_out, ihmsf_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._d2tf(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(sign_out.shape) > 0 and sign_out.shape[0] == 1 + sign_out = sign_out.reshape(sign_out.shape[1:]) + assert len(ihmsf_out.shape) > 0 and ihmsf_out.shape[0] == 1 + ihmsf_out = ihmsf_out.reshape(ihmsf_out.shape[1:]) + + return sign_out, ihmsf_out + + +def tf2a(s, ihour, imin, sec): + """ + Wrapper for ERFA function ``eraTf2a``. + + Parameters + ---------- + s : char array + ihour : int array + imin : int array + sec : double array + + Returns + ------- + rad : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a T f 2 a + - - - - - - - - + + Convert hours, minutes, seconds to radians. + + Given: + s char sign: '-' = negative, otherwise positive + ihour int hours + imin int minutes + sec double seconds + + Returned: + rad double angle in radians + + Returned (function value): + int status: 0 = OK + 1 = ihour outside range 0-23 + 2 = imin outside range 0-59 + 3 = sec outside range 0-59.999... + + Notes: + + 1) The result is computed even if any of the range checks fail. + + 2) Negative ihour, imin and/or sec produce a warning status, but + the absolute value is used in the conversion. + + 3) If there are multiple errors, the status value reflects only the + first, the smallest taking precedence. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + s_in = numpy.array(s, dtype=numpy.dtype('S1'), order="C", copy=False, subok=True) + ihour_in = numpy.array(ihour, dtype=numpy.intc, order="C", copy=False, subok=True) + imin_in = numpy.array(imin, dtype=numpy.intc, order="C", copy=False, subok=True) + sec_in = numpy.array(sec, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), s_in, ihour_in, imin_in, sec_in) + rad_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [s_in, ihour_in, imin_in, sec_in, rad_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tf2a(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tf2a') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rad_out.shape) > 0 and rad_out.shape[0] == 1 + rad_out = rad_out.reshape(rad_out.shape[1:]) + + return rad_out +STATUS_CODES['tf2a'] = {0: 'OK', 1: 'ihour outside range 0-23', 2: 'imin outside range 0-59', 3: 'sec outside range 0-59.999...'} + + + +def tf2d(s, ihour, imin, sec): + """ + Wrapper for ERFA function ``eraTf2d``. + + Parameters + ---------- + s : char array + ihour : int array + imin : int array + sec : double array + + Returns + ------- + days : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a T f 2 d + - - - - - - - - + + Convert hours, minutes, seconds to days. + + Given: + s char sign: '-' = negative, otherwise positive + ihour int hours + imin int minutes + sec double seconds + + Returned: + days double interval in days + + Returned (function value): + int status: 0 = OK + 1 = ihour outside range 0-23 + 2 = imin outside range 0-59 + 3 = sec outside range 0-59.999... + + Notes: + + 1) The result is computed even if any of the range checks fail. + + 2) Negative ihour, imin and/or sec produce a warning status, but + the absolute value is used in the conversion. + + 3) If there are multiple errors, the status value reflects only the + first, the smallest taking precedence. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + s_in = numpy.array(s, dtype=numpy.dtype('S1'), order="C", copy=False, subok=True) + ihour_in = numpy.array(ihour, dtype=numpy.intc, order="C", copy=False, subok=True) + imin_in = numpy.array(imin, dtype=numpy.intc, order="C", copy=False, subok=True) + sec_in = numpy.array(sec, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), s_in, ihour_in, imin_in, sec_in) + days_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [s_in, ihour_in, imin_in, sec_in, days_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tf2d(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tf2d') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(days_out.shape) > 0 and days_out.shape[0] == 1 + days_out = days_out.reshape(days_out.shape[1:]) + + return days_out +STATUS_CODES['tf2d'] = {0: 'OK', 1: 'ihour outside range 0-23', 2: 'imin outside range 0-59', 3: 'sec outside range 0-59.999...'} + + + +def rxp(r, p): + """ + Wrapper for ERFA function ``eraRxp``. + + Parameters + ---------- + r : double array + p : double array + + Returns + ------- + rp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a R x p + - - - - - - - + + Multiply a p-vector by an r-matrix. + + Given: + r double[3][3] r-matrix + p double[3] p-vector + + Returned: + rp double[3] r * p + + Note: + It is permissible for p and rp to be the same array. + + Called: + eraCp copy p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(r_in, (3, 3), "r") + check_trailing_shape(p_in, (3,), "p") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r_in[...,0,0], p_in[...,0]) + rp_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r_in[...,0,0], p_in[...,0], rp_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._rxp(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + + return rp_out + + +def rxpv(r, pv): + """ + Wrapper for ERFA function ``eraRxpv``. + + Parameters + ---------- + r : double array + pv : double array + + Returns + ------- + rpv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a R x p v + - - - - - - - - + + Multiply a pv-vector by an r-matrix. + + Given: + r double[3][3] r-matrix + pv double[2][3] pv-vector + + Returned: + rpv double[2][3] r * pv + + Note: + It is permissible for pv and rpv to be the same array. + + Called: + eraRxp product of r-matrix and p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(r_in, (3, 3), "r") + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r_in[...,0,0], pv_in[...,0,0]) + rpv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r_in[...,0,0], pv_in[...,0,0], rpv_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._rxpv(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rpv_out.shape) > 0 and rpv_out.shape[0] == 1 + rpv_out = rpv_out.reshape(rpv_out.shape[1:]) + + return rpv_out + + +def trxp(r, p): + """ + Wrapper for ERFA function ``eraTrxp``. + + Parameters + ---------- + r : double array + p : double array + + Returns + ------- + trp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a T r x p + - - - - - - - - + + Multiply a p-vector by the transpose of an r-matrix. + + Given: + r double[3][3] r-matrix + p double[3] p-vector + + Returned: + trp double[3] r * p + + Note: + It is permissible for p and trp to be the same array. + + Called: + eraTr transpose r-matrix + eraRxp product of r-matrix and p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(r_in, (3, 3), "r") + check_trailing_shape(p_in, (3,), "p") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r_in[...,0,0], p_in[...,0]) + trp_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r_in[...,0,0], p_in[...,0], trp_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._trxp(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(trp_out.shape) > 0 and trp_out.shape[0] == 1 + trp_out = trp_out.reshape(trp_out.shape[1:]) + + return trp_out + + +def trxpv(r, pv): + """ + Wrapper for ERFA function ``eraTrxpv``. + + Parameters + ---------- + r : double array + pv : double array + + Returns + ------- + trpv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T r x p v + - - - - - - - - - + + Multiply a pv-vector by the transpose of an r-matrix. + + Given: + r double[3][3] r-matrix + pv double[2][3] pv-vector + + Returned: + trpv double[2][3] r * pv + + Note: + It is permissible for pv and trpv to be the same array. + + Called: + eraTr transpose r-matrix + eraRxpv product of r-matrix and pv-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(r_in, (3, 3), "r") + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r_in[...,0,0], pv_in[...,0,0]) + trpv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r_in[...,0,0], pv_in[...,0,0], trpv_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._trxpv(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(trpv_out.shape) > 0 and trpv_out.shape[0] == 1 + trpv_out = trpv_out.reshape(trpv_out.shape[1:]) + + return trpv_out + + +def c2s(p): + """ + Wrapper for ERFA function ``eraC2s``. + + Parameters + ---------- + p : double array + + Returns + ------- + theta : double array + phi : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a C 2 s + - - - - - - - + + P-vector to spherical coordinates. + + Given: + p double[3] p-vector + + Returned: + theta double longitude angle (radians) + phi double latitude angle (radians) + + Notes: + + 1) The vector p can have any magnitude; only its direction is used. + + 2) If p is null, zero theta and phi are returned. + + 3) At either pole, zero theta is returned. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(p_in, (3,), "p") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), p_in[...,0]) + theta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [p_in[...,0], theta_out, phi_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2s(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(theta_out.shape) > 0 and theta_out.shape[0] == 1 + theta_out = theta_out.reshape(theta_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + + return theta_out, phi_out + + +def p2s(p): + """ + Wrapper for ERFA function ``eraP2s``. + + Parameters + ---------- + p : double array + + Returns + ------- + theta : double array + phi : double array + r : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a P 2 s + - - - - - - - + + P-vector to spherical polar coordinates. + + Given: + p double[3] p-vector + + Returned: + theta double longitude angle (radians) + phi double latitude angle (radians) + r double radial distance + + Notes: + + 1) If P is null, zero theta, phi and r are returned. + + 2) At either pole, zero theta is returned. + + Called: + eraC2s p-vector to spherical + eraPm modulus of p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(p_in, (3,), "p") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), p_in[...,0]) + theta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + r_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [p_in[...,0], theta_out, phi_out, r_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._p2s(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(theta_out.shape) > 0 and theta_out.shape[0] == 1 + theta_out = theta_out.reshape(theta_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(r_out.shape) > 0 and r_out.shape[0] == 1 + r_out = r_out.reshape(r_out.shape[1:]) + + return theta_out, phi_out, r_out + + +def pv2s(pv): + """ + Wrapper for ERFA function ``eraPv2s``. + + Parameters + ---------- + pv : double array + + Returns + ------- + theta : double array + phi : double array + r : double array + td : double array + pd : double array + rd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P v 2 s + - - - - - - - - + + Convert position/velocity from Cartesian to spherical coordinates. + + Given: + pv double[2][3] pv-vector + + Returned: + theta double longitude angle (radians) + phi double latitude angle (radians) + r double radial distance + td double rate of change of theta + pd double rate of change of phi + rd double rate of change of r + + Notes: + + 1) If the position part of pv is null, theta, phi, td and pd + are indeterminate. This is handled by extrapolating the + position through unit time by using the velocity part of + pv. This moves the origin without changing the direction + of the velocity component. If the position and velocity + components of pv are both null, zeroes are returned for all + six results. + + 2) If the position is a pole, theta, td and pd are indeterminate. + In such cases zeroes are returned for all three. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), pv_in[...,0,0]) + theta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + r_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + td_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [pv_in[...,0,0], theta_out, phi_out, r_out, td_out, pd_out, rd_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pv2s(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(theta_out.shape) > 0 and theta_out.shape[0] == 1 + theta_out = theta_out.reshape(theta_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(r_out.shape) > 0 and r_out.shape[0] == 1 + r_out = r_out.reshape(r_out.shape[1:]) + assert len(td_out.shape) > 0 and td_out.shape[0] == 1 + td_out = td_out.reshape(td_out.shape[1:]) + assert len(pd_out.shape) > 0 and pd_out.shape[0] == 1 + pd_out = pd_out.reshape(pd_out.shape[1:]) + assert len(rd_out.shape) > 0 and rd_out.shape[0] == 1 + rd_out = rd_out.reshape(rd_out.shape[1:]) + + return theta_out, phi_out, r_out, td_out, pd_out, rd_out + + +def s2c(theta, phi): + """ + Wrapper for ERFA function ``eraS2c``. + + Parameters + ---------- + theta : double array + phi : double array + + Returns + ------- + c : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a S 2 c + - - - - - - - + + Convert spherical coordinates to Cartesian. + + Given: + theta double longitude angle (radians) + phi double latitude angle (radians) + + Returned: + c double[3] direction cosines + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), theta_in, phi_in) + c_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [theta_in, phi_in, c_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s2c(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_out.shape) > 0 and c_out.shape[0] == 1 + c_out = c_out.reshape(c_out.shape[1:]) + + return c_out + + +def s2p(theta, phi, r): + """ + Wrapper for ERFA function ``eraS2p``. + + Parameters + ---------- + theta : double array + phi : double array + r : double array + + Returns + ------- + p : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a S 2 p + - - - - - - - + + Convert spherical polar coordinates to p-vector. + + Given: + theta double longitude angle (radians) + phi double latitude angle (radians) + r double radial distance + + Returned: + p double[3] Cartesian coordinates + + Called: + eraS2c spherical coordinates to unit vector + eraSxp multiply p-vector by scalar + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), theta_in, phi_in, r_in) + p_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [theta_in, phi_in, r_in, p_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s2p(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(p_out.shape) > 0 and p_out.shape[0] == 1 + p_out = p_out.reshape(p_out.shape[1:]) + + return p_out + + +def s2pv(theta, phi, r, td, pd, rd): + """ + Wrapper for ERFA function ``eraS2pv``. + + Parameters + ---------- + theta : double array + phi : double array + r : double array + td : double array + pd : double array + rd : double array + + Returns + ------- + pv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S 2 p v + - - - - - - - - + + Convert position/velocity from spherical to Cartesian coordinates. + + Given: + theta double longitude angle (radians) + phi double latitude angle (radians) + r double radial distance + td double rate of change of theta + pd double rate of change of phi + rd double rate of change of r + + Returned: + pv double[2][3] pv-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + td_in = numpy.array(td, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + rd_in = numpy.array(rd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), theta_in, phi_in, r_in, td_in, pd_in, rd_in) + pv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [theta_in, phi_in, r_in, td_in, pd_in, rd_in, pv_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s2pv(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pv_out.shape) > 0 and pv_out.shape[0] == 1 + pv_out = pv_out.reshape(pv_out.shape[1:]) + + return pv_out + + + + +# TODO: delete the functions below when they can get auto-generated +# (current machinery doesn't support returning strings or non-status-codes) +def version(): + """ + Returns the package version + as defined in configure.ac + in string format + """ + return "1.4.0" + +def version_major(): + """ + Returns the package major version + as defined in configure.ac + as integer + """ + return 1 + +def version_minor(): + """ + Returns the package minor version + as defined in configure.ac + as integer + """ + return 4 + +def version_micro(): + """ + Returns the package micro version + as defined in configure.ac + as integer + """ + return 0 + +def sofa_version(): + """ + Returns the corresponding SOFA version + as defined in configure.ac + in string format + """ + return "20170420" \ No newline at end of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/core.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/core.pyc new file mode 100644 index 0000000000000000000000000000000000000000..040ec47ef9eafc4fe634f09001c58f45bee16e51 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/core.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/erfa_generator.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/erfa_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..20e5ab42466fa1380d2bbdfd6850ad4bec293ff3 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/erfa_generator.py @@ -0,0 +1,563 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This module's main purpose is to act as a script to create new versions +of erfa.c when ERFA is updated (or this generator is enhanced). + +`Jinja2 `_ must be installed for this +module/script to function. + +Note that this does *not* currently automate the process of creating structs +or dtypes for those structs. They should be added manually in the template file. +""" +from __future__ import absolute_import, division, print_function +# note that we do *not* use unicode_literals here, because that makes the +# generated code's strings have u'' in them on py 2.x + +import re +import os.path +from collections import OrderedDict + + +ctype_to_dtype = {'double': "numpy.double", + 'int': "numpy.intc", + 'eraASTROM': "dt_eraASTROM", + 'eraLDBODY': "dt_eraLDBODY", + 'char': "numpy.dtype('S1')", + 'const char': "numpy.dtype('S16')", + } + + +NDIMS_REX = re.compile(re.escape("numpy.dtype([('fi0', '.*', <(.*)>)])").replace(r'\.\*', '.*').replace(r'\<', '(').replace(r'\>', ')')) + + +class FunctionDoc(object): + + def __init__(self, doc): + self.doc = doc.replace("**", " ").replace("/*\n", "").replace("*/", "") + self.__input = None + self.__output = None + self.__ret_info = None + + @property + def input(self): + if self.__input is None: + self.__input = [] + result = re.search("Given([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + __input = result.group(2) + for i in __input.split("\n"): + arg_doc = ArgumentDoc(i) + if arg_doc.name is not None: + self.__input.append(arg_doc) + result = re.search("Given and returned([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + __input = result.group(2) + for i in __input.split("\n"): + arg_doc = ArgumentDoc(i) + if arg_doc.name is not None: + self.__input.append(arg_doc) + return self.__input + + @property + def output(self): + if self.__output is None: + self.__output = [] + result = re.search("Returned([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + __output = result.group(2) + for i in __output.split("\n"): + arg_doc = ArgumentDoc(i) + if arg_doc.name is not None: + self.__output.append(arg_doc) + result = re.search("Given and returned([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + __output = result.group(2) + for i in __output.split("\n"): + arg_doc = ArgumentDoc(i) + if arg_doc.name is not None: + self.__output.append(arg_doc) + return self.__output + + @property + def ret_info(self): + if self.__ret_info is None: + ret_info = [] + result = re.search("Returned \\(function value\\)([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + ret_info.append(ReturnDoc(result.group(2))) + + if len(ret_info) == 0: + self.__ret_info = '' + elif len(ret_info) == 1: + self.__ret_info = ret_info[0] + else: + raise ValueError("Multiple C return sections found in this doc:\n" + self.doc) + + return self.__ret_info + + def __repr__(self): + return self.doc.replace(" \n", "\n") + + +class ArgumentDoc(object): + + def __init__(self, doc): + match = re.search("^ +([^ ]+)[ ]+([^ ]+)[ ]+(.+)", doc) + if match is not None: + self.name = match.group(1) + self.type = match.group(2) + self.doc = match.group(3) + else: + self.name = None + self.type = None + self.doc = None + + def __repr__(self): + return " {0:15} {1:15} {2}".format(self.name, self.type, self.doc) + + +class Argument(object): + + def __init__(self, definition, doc): + self.doc = doc + self.__inout_state = None + self.ctype, ptr_name_arr = definition.strip().rsplit(" ", 1) + if "*" == ptr_name_arr[0]: + self.is_ptr = True + name_arr = ptr_name_arr[1:] + else: + self.is_ptr = False + name_arr = ptr_name_arr + if "[]" in ptr_name_arr: + self.is_ptr = True + name_arr = name_arr[:-2] + if "[" in name_arr: + self.name, arr = name_arr.split("[", 1) + self.shape = tuple([int(size) for size in arr[:-1].split("][")]) + else: + self.name = name_arr + self.shape = () + + @property + def inout_state(self): + if self.__inout_state is None: + self.__inout_state = '' + for i in self.doc.input: + if self.name in i.name.split(','): + self.__inout_state = 'in' + for o in self.doc.output: + if self.name in o.name.split(','): + if self.__inout_state == 'in': + self.__inout_state = 'inout' + else: + self.__inout_state = 'out' + return self.__inout_state + + @property + def ctype_ptr(self): + if (self.is_ptr) | (len(self.shape) > 0): + return self.ctype+" *" + else: + return self.ctype + + @property + def name_in_broadcast(self): + if len(self.shape) > 0: + return "{0}_in[...{1}]".format(self.name, ",0"*len(self.shape)) + else: + return "{0}_in".format(self.name) + + @property + def name_out_broadcast(self): + if len(self.shape) > 0: + return "{0}_out[...{1}]".format(self.name, ",0"*len(self.shape)) + else: + return "{0}_out".format(self.name) + + @property + def dtype(self): + return ctype_to_dtype[self.ctype] + + @property + def ndim(self): + return len(self.shape) + + @property + def cshape(self): + return ''.join(['[{0}]'.format(s) for s in self.shape]) + + @property + def name_for_call(self): + if self.is_ptr: + return '_'+self.name + else: + return '*_'+self.name + + def __repr__(self): + return "Argument('{0}', name='{1}', ctype='{2}', inout_state='{3}')".format(self.definition, self.name, self.ctype, self.inout_state) + + +class ReturnDoc(object): + + def __init__(self, doc): + self.doc = doc + + self.infoline = doc.split('\n')[0].strip() + self.type = self.infoline.split()[0] + self.descr = self.infoline.split()[1] + + if self.descr.startswith('status'): + self.statuscodes = statuscodes = {} + + code = None + for line in doc[doc.index(':')+1:].split('\n'): + ls = line.strip() + if ls != '': + if ' = ' in ls: + code, msg = ls.split(' = ') + if code != 'else': + code = int(code) + statuscodes[code] = msg + elif code is not None: + statuscodes[code] += ls + else: + self.statuscodes = None + + def __repr__(self): + return "Return value, type={0:15}, {1}, {2}".format(self.type, self.descr, self.doc) + + +class Return(object): + + def __init__(self, ctype, doc): + self.name = 'c_retval' + self.name_out_broadcast = self.name+"_out" + self.inout_state = 'stat' if ctype == 'int' else 'ret' + self.ctype = ctype + self.ctype_ptr = ctype + self.shape = () + self.doc = doc + + def __repr__(self): + return "Return(name='{0}', ctype='{1}', inout_state='{2}')".format(self.name, self.ctype, self.inout_state) + + @property + def dtype(self): + return ctype_to_dtype[self.ctype] + + @property + def nd_dtype(self): + """ + This if the return type has a multi-dimensional output, like + double[3][3] + """ + return "'fi0'" in self.dtype + + @property + def doc_info(self): + return self.doc.ret_info + + +class Function(object): + """ + A class representing a C function. + + Parameters + ---------- + name : str + The name of the function + source_path : str + Either a directory, which means look for the function in a + stand-alone file (like for the standard ERFA distribution), or a + file, which means look for the function in that file (as for the + astropy-packaged single-file erfa.c). + match_line : str, optional + If given, searching of the source file will skip until it finds + a line matching this string, and start from there. + """ + + def __init__(self, name, source_path, match_line=None): + self.name = name + self.pyname = name.split('era')[-1].lower() + self.filename = self.pyname+".c" + if os.path.isdir(source_path): + self.filepath = os.path.join(os.path.normpath(source_path), self.filename) + else: + self.filepath = source_path + + with open(self.filepath) as f: + if match_line: + line = f.readline() + while line != '': + if line.startswith(match_line): + filecontents = '\n' + line + f.read() + break + line = f.readline() + else: + msg = ('Could not find the match_line "{0}" in ' + 'the source file "{1}"') + raise ValueError(msg.format(match_line, self.filepath)) + else: + filecontents = f.read() + + pattern = r"\n([^\n]+{0} ?\([^)]+\)).+?(/\*.+?\*/)".format(name) + p = re.compile(pattern, flags=re.DOTALL | re.MULTILINE) + + search = p.search(filecontents) + self.cfunc = " ".join(search.group(1).split()) + self.doc = FunctionDoc(search.group(2)) + + self.args = [] + for arg in re.search(r"\(([^)]+)\)", self.cfunc).group(1).split(', '): + self.args.append(Argument(arg, self.doc)) + self.ret = re.search("^(.*){0}".format(name), self.cfunc).group(1).strip() + if self.ret != 'void': + self.args.append(Return(self.ret, self.doc)) + + def args_by_inout(self, inout_filter, prop=None, join=None): + """ + Gives all of the arguments and/or returned values, depending on whether + they are inputs, outputs, etc. + + The value for `inout_filter` should be a string containing anything + that arguments' `inout_state` attribute produces. Currently, that can be: + + * "in" : input + * "out" : output + * "inout" : something that's could be input or output (e.g. a struct) + * "ret" : the return value of the C function + * "stat" : the return value of the C function if it is a status code + + It can also be a "|"-separated string giving inout states to OR + together. + """ + result = [] + for arg in self.args: + if arg.inout_state in inout_filter.split('|'): + if prop is None: + result.append(arg) + else: + result.append(getattr(arg, prop)) + if join is not None: + return join.join(result) + else: + return result + + def __repr__(self): + return "Function(name='{0}', pyname='{1}', filename='{2}', filepath='{3}')".format(self.name, self.pyname, self.filename, self.filepath) + + +class Constant(object): + + def __init__(self, name, value, doc): + self.name = name.replace("ERFA_", "") + self.value = value.replace("ERFA_", "") + self.doc = doc + + +class ExtraFunction(Function): + """ + An "extra" function - e.g. one not following the SOFA/ERFA standard format. + + Parameters + ---------- + cname : str + The name of the function in C + prototype : str + The prototype for the function (usually derived from the header) + pathfordoc : str + The path to a file that contains the prototype, with the documentation + as a multiline string *before* it. + """ + + def __init__(self, cname, prototype, pathfordoc): + self.name = cname + self.pyname = cname.split('era')[-1].lower() + self.filepath, self.filename = os.path.split(pathfordoc) + + self.prototype = prototype.strip() + if prototype.endswith('{') or prototype.endswith(';'): + self.prototype = prototype[:-1].strip() + + incomment = False + lastcomment = None + with open(pathfordoc, 'r') as f: + for l in f: + if incomment: + if l.lstrip().startswith('*/'): + incomment = False + lastcomment = ''.join(lastcomment) + else: + if l.startswith('**'): + l = l[2:] + lastcomment.append(l) + else: + if l.lstrip().startswith('/*'): + incomment = True + lastcomment = [] + if l.startswith(self.prototype): + self.doc = lastcomment + break + else: + raise ValueError('Did not find prototype {} in file ' + '{}'.format(self.prototype, pathfordoc)) + + self.args = [] + argset = re.search(r"{0}\(([^)]+)?\)".format(self.name), + self.prototype).group(1) + if argset is not None: + for arg in argset.split(', '): + self.args.append(Argument(arg, self.doc)) + self.ret = re.match("^(.*){0}".format(self.name), + self.prototype).group(1).strip() + if self.ret != 'void': + self.args.append(Return(self.ret, self.doc)) + + def __repr__(self): + r = super(ExtraFunction, self).__repr__() + if r.startswith('Function'): + r = 'Extra' + r + return r + + +def main(srcdir, outfn, templateloc, verbose=True): + from jinja2 import Environment, FileSystemLoader + + if verbose: + print_ = lambda *args, **kwargs: print(*args, **kwargs) + else: + print_ = lambda *args, **kwargs: None + + # Prepare the jinja2 templating environment + env = Environment(loader=FileSystemLoader(templateloc)) + + def prefix(a_list, pre): + return [pre+'{0}'.format(an_element) for an_element in a_list] + + def postfix(a_list, post): + return ['{0}'.format(an_element)+post for an_element in a_list] + + def surround(a_list, pre, post): + return [pre+'{0}'.format(an_element)+post for an_element in a_list] + env.filters['prefix'] = prefix + env.filters['postfix'] = postfix + env.filters['surround'] = surround + + erfa_c_in = env.get_template('core.c.templ') + erfa_py_in = env.get_template('core.py.templ') + + # Extract all the ERFA function names from erfa.h + if os.path.isdir(srcdir): + erfahfn = os.path.join(srcdir, 'erfa.h') + multifilserc = True + else: + erfahfn = os.path.join(os.path.split(srcdir)[0], 'erfa.h') + multifilserc = False + + with open(erfahfn, "r") as f: + erfa_h = f.read() + + funcs = OrderedDict() + section_subsection_functions = re.findall(r'/\* (\w*)/(\w*) \*/\n(.*?)\n\n', + erfa_h, flags=re.DOTALL | re.MULTILINE) + for section, subsection, functions in section_subsection_functions: + print_("{0}.{1}".format(section, subsection)) + if ((section == "Astronomy") or (subsection == "AngleOps") + or (subsection == "SphericalCartesian") + or (subsection == "MatrixVectorProducts")): + func_names = re.findall(r' (\w+)\(.*?\);', functions, flags=re.DOTALL) + for name in func_names: + print_("{0}.{1}.{2}...".format(section, subsection, name)) + if multifilserc: + # easy because it just looks in the file itself + funcs[name] = Function(name, srcdir) + else: + # Have to tell it to look for a declaration matching + # the start of the header declaration, otherwise it + # might find a *call* of the function instead of the + # definition + for line in functions.split(r'\n'): + if name in line: + # [:-1] is to remove trailing semicolon, and + # splitting on '(' is because the header and + # C files don't necessarily have to match + # argument names and line-breaking or + # whitespace + match_line = line[:-1].split('(')[0] + funcs[name] = Function(name, srcdir, match_line) + break + else: + raise ValueError("A name for a C file wasn't " + "found in the string that " + "spawned it. This should be " + "impossible!") + + funcs = list(funcs.values()) + + # Extract all the ERFA constants from erfam.h + erfamhfn = os.path.join(srcdir, 'erfam.h') + with open(erfamhfn, 'r') as f: + erfa_m_h = f.read() + constants = [] + for chunk in erfa_m_h.split("\n\n"): + result = re.findall(r"#define (ERFA_\w+?) (.+?)$", chunk, flags=re.DOTALL | re.MULTILINE) + if result: + doc = re.findall(r"/\* (.+?) \*/\n", chunk, flags=re.DOTALL) + for (name, value) in result: + constants.append(Constant(name, value, doc)) + + # TODO: re-enable this when const char* return values and non-status code integer rets are possible + # #Add in any "extra" functions from erfaextra.h + # erfaextrahfn = os.path.join(srcdir, 'erfaextra.h') + # with open(erfaextrahfn, 'r') as f: + # for l in f: + # ls = l.strip() + # match = re.match('.* (era.*)\(', ls) + # if match: + # print_("Extra: {0} ...".format(match.group(1))) + # funcs.append(ExtraFunction(match.group(1), ls, erfaextrahfn)) + + print_("Rendering template") + erfa_c = erfa_c_in.render(funcs=funcs) + erfa_py = erfa_py_in.render(funcs=funcs, constants=constants) + + if outfn is not None: + outfn_c = os.path.splitext(outfn)[0] + ".c" + print_("Saving to", outfn, 'and', outfn_c) + with open(outfn, "w") as f: + f.write(erfa_py) + with open(outfn_c, "w") as f: + f.write(erfa_c) + + print_("Done!") + + return erfa_c, erfa_py, funcs + + +DEFAULT_ERFA_LOC = os.path.join(os.path.split(__file__)[0], + '../../cextern/erfa') +DEFAULT_TEMPLATE_LOC = os.path.split(__file__)[0] + +if __name__ == '__main__': + from argparse import ArgumentParser + + ap = ArgumentParser() + ap.add_argument('srcdir', default=DEFAULT_ERFA_LOC, nargs='?', + help='Directory where the ERFA c and header files ' + 'can be found or to a single erfa.c file ' + '(which must be in the same directory as ' + 'erfa.h). Defaults to the builtin astropy ' + 'erfa: "{0}"'.format(DEFAULT_ERFA_LOC)) + ap.add_argument('-o', '--output', default='core.py', + help='The output filename. This is the name for only the ' + 'pure-python output, the C part will have the ' + 'same name but with a ".c" extension.') + ap.add_argument('-t', '--template-loc', + default=DEFAULT_TEMPLATE_LOC, + help='the location where the "core.c.templ" ' + 'template can be found.') + ap.add_argument('-q', '--quiet', action='store_false', dest='verbose', + help='Suppress output normally printed to stdout.') + + args = ap.parse_args() + main(args.srcdir, args.output, args.template_loc) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/erfa_generator.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/erfa_generator.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8c482ba795e02d1d51ef3ab3285978e4ebc54a0 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/erfa_generator.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/setup_package.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/setup_package.py new file mode 100644 index 0000000000000000000000000000000000000000..d609d1623056cd3231ff460016e83ad25a596215 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/setup_package.py @@ -0,0 +1,118 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import + +import os +import glob + +from distutils import log +from distutils.extension import Extension + +from astropy_helpers import setup_helpers +from astropy_helpers.version_helpers import get_pkg_version_module + +ERFAPKGDIR = os.path.relpath(os.path.dirname(__file__)) + +ERFA_SRC = os.path.abspath(os.path.join(ERFAPKGDIR, '..', '..', 'cextern', 'erfa')) + +SRC_FILES = glob.glob(os.path.join(ERFA_SRC, '*')) +SRC_FILES += [os.path.join(ERFAPKGDIR, filename) + for filename in ['core.py.templ', 'core.c.templ', 'erfa_generator.py']] + +GEN_FILES = [os.path.join(ERFAPKGDIR, 'core.py'), os.path.join(ERFAPKGDIR, 'core.c')] + + +def pre_build_py_hook(cmd_obj): + preprocess_source() + + +def pre_build_ext_hook(cmd_obj): + preprocess_source() + + +def pre_sdist_hook(cmd_obj): + preprocess_source() + + +def preprocess_source(): + # Generating the ERFA wrappers should only be done if needed. This also + # ensures that it is not done for any release tarball since those will + # include core.py and core.c. + if all(os.path.exists(filename) for filename in GEN_FILES): + + # Determine modification times + erfa_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES) + gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES) + + version = get_pkg_version_module('astropy') + + if gen_mtime > erfa_mtime: + # If generated source is recent enough, don't update + return + elif version.release: + # or, if we're on a release, issue a warning, but go ahead and use + # the wrappers anyway + log.warn('WARNING: The autogenerated wrappers in astropy._erfa ' + 'seem to be older than the source templates used to ' + 'create them. Because this is a release version we will ' + 'use them anyway, but this might be a sign of some sort ' + 'of version mismatch or other tampering. Or it might just ' + 'mean you moved some files around or otherwise ' + 'accidentally changed timestamps.') + return + # otherwise rebuild the autogenerated files + + # If jinja2 isn't present, then print a warning and use existing files + try: + import jinja2 # pylint: disable=W0611 + except ImportError: + log.warn("WARNING: jinja2 could not be imported, so the existing " + "ERFA core.py and core.c files will be used") + return + + name = 'erfa_generator' + filename = os.path.join(ERFAPKGDIR, 'erfa_generator.py') + + try: + from importlib import machinery as import_machinery + loader = import_machinery.SourceFileLoader(name, filename) + gen = loader.load_module() + except ImportError: + import imp + gen = imp.load_source(name, filename) + + gen.main(gen.DEFAULT_ERFA_LOC, + os.path.join(ERFAPKGDIR, 'core.py'), + gen.DEFAULT_TEMPLATE_LOC, + verbose=False) + + +def get_extensions(): + sources = [os.path.join(ERFAPKGDIR, "core.c")] + include_dirs = ['numpy'] + libraries = [] + + if setup_helpers.use_system_library('erfa'): + libraries.append('erfa') + else: + # get all of the .c files in the cextern/erfa directory + erfafns = os.listdir(ERFA_SRC) + sources.extend(['cextern/erfa/'+fn for fn in erfafns if fn.endswith('.c')]) + + include_dirs.append('cextern/erfa') + + erfa_ext = Extension( + name="astropy._erfa._core", + sources=sources, + include_dirs=include_dirs, + libraries=libraries, + language="c",) + + return [erfa_ext] + + +def get_external_libraries(): + return ['erfa'] + + +def requires_2to3(): + return False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/setup_package.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/setup_package.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7826ef0de9a5e3aa103e7da78175b863afe1d0a4 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/setup_package.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9dce85d06f83264a6e502be0906d86bd976e1677 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/__init__.py @@ -0,0 +1 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..004060a8a0717c97017dfe115bddcb042a8261aa Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/test_erfa.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/test_erfa.py new file mode 100644 index 0000000000000000000000000000000000000000..493d54a7063f77d57043dc49c83181cefb724641 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/test_erfa.py @@ -0,0 +1,233 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +import numpy as np +from .. import core as erfa +from ...tests.helper import catch_warnings + + +def test_erfa_wrapper(): + """ + Runs a set of tests that mostly make sure vectorization is + working as expected + """ + + jd = np.linspace(2456855.5, 2456855.5+1.0/24.0/60.0, 60*2+1) + ra = np.linspace(0.0, np.pi*2.0, 5) + dec = np.linspace(-np.pi/2.0, np.pi/2.0, 4) + + aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd, 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5) + assert aob.shape == (121,) + + aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd[0], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5) + assert aob.shape == () + + aob, zob, hob, dob, rob, eo = erfa.atco13(ra[:, None, None], dec[None, :, None], 0.0, 0.0, 0.0, 0.0, jd[None, None, :], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5) + (aob.shape) == (5, 4, 121) + + iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd, 0.0) + assert iy.shape == (121,) + assert ihmsf.shape == (121, 4) + assert ihmsf.dtype == np.dtype('i4') + + iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd[0], 0.0) + assert iy.shape == () + assert ihmsf.shape == (4,) + assert ihmsf.dtype == np.dtype('i4') + + +def test_angle_ops(): + + sign, idmsf = erfa.a2af(6, -np.pi) + assert sign == b'-' + assert (idmsf == [180, 0, 0, 0]).all() + + sign, ihmsf = erfa.a2tf(6, np.pi) + assert sign == b'+' + assert (ihmsf == [12, 0, 0, 0]).all() + + rad = erfa.af2a('-', 180, 0, 0.0) + np.testing.assert_allclose(rad, -np.pi) + + rad = erfa.tf2a('+', 12, 0, 0.0) + np.testing.assert_allclose(rad, np.pi) + + rad = erfa.anp(3.*np.pi) + np.testing.assert_allclose(rad, np.pi) + + rad = erfa.anpm(3.*np.pi) + np.testing.assert_allclose(rad, -np.pi) + + sign, ihmsf = erfa.d2tf(1, -1.5) + assert sign == b'-' + assert (ihmsf == [36, 0, 0, 0]).all() + + days = erfa.tf2d('+', 3, 0, 0.0) + np.testing.assert_allclose(days, 0.125) + + +def test_spherical_cartesian(): + + theta, phi = erfa.c2s([0.0, np.sqrt(2.0), np.sqrt(2.0)]) + np.testing.assert_allclose(theta, np.pi/2.0) + np.testing.assert_allclose(phi, np.pi/4.0) + + theta, phi, r = erfa.p2s([0.0, np.sqrt(2.0), np.sqrt(2.0)]) + np.testing.assert_allclose(theta, np.pi/2.0) + np.testing.assert_allclose(phi, np.pi/4.0) + np.testing.assert_allclose(r, 2.0) + + theta, phi, r, td, pd, rd = erfa.pv2s([[0.0, np.sqrt(2.0), np.sqrt(2.0)], [1.0, 0.0, 0.0]]) + np.testing.assert_allclose(theta, np.pi/2.0) + np.testing.assert_allclose(phi, np.pi/4.0) + np.testing.assert_allclose(r, 2.0) + np.testing.assert_allclose(td, -np.sqrt(2.0)/2.0) + np.testing.assert_allclose(pd, 0.0) + np.testing.assert_allclose(rd, 0.0) + + c = erfa.s2c(np.pi/2.0, np.pi/4.0) + np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14) + + c = erfa.s2p(np.pi/2.0, np.pi/4.0, 1.0) + np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14) + + pv = erfa.s2pv(np.pi/2.0, np.pi/4.0, 2.0, np.sqrt(2.0)/2.0, 0.0, 0.0) + np.testing.assert_allclose(pv, [[0.0, np.sqrt(2.0), np.sqrt(2.0)], [-1.0, 0.0, 0.0]], atol=1e-14) + + +def test_errwarn_reporting(): + """ + Test that the ERFA error reporting mechanism works as it should + """ + + # no warning + erfa.dat(1990, 1, 1, 0.5) + + # check warning is raised for a scalar + with catch_warnings() as w: + erfa.dat(100, 1, 1, 0.5) + assert len(w) == 1 + assert w[0].category == erfa.ErfaWarning + assert '1 of "dubious year (Note 1)"' in str(w[0].message) + + # and that the count is right for a vector. + with catch_warnings() as w: + erfa.dat([100, 200, 1990], 1, 1, 0.5) + assert len(w) == 1 + assert w[0].category == erfa.ErfaWarning + assert '2 of "dubious year (Note 1)"' in str(w[0].message) + + try: + erfa.dat(1990, [1, 34, 2], [1, 1, 43], 0.5) + except erfa.ErfaError as e: + if '1 of "bad day (Note 3)", 1 of "bad month"' not in e.args[0]: + assert False, 'Raised the correct type of error, but wrong message: ' + e.args[0] + + try: + erfa.dat(200, [1, 34, 2], [1, 1, 43], 0.5) + except erfa.ErfaError as e: + if 'warning' in e.args[0]: + assert False, 'Raised the correct type of error, but there were warnings mixed in: ' + e.args[0] + + +def test_vector_inouts(): + """ + Tests that ERFA functions working with vectors are correctly consumed and spit out + """ + + # values are from test_erfa.c t_ab function + pnat = [-0.76321968546737951, + -0.60869453983060384, + -0.21676408580639883] + v = [2.1044018893653786e-5, + -8.9108923304429319e-5, + -3.8633714797716569e-5] + s = 0.99980921395708788 + bm1 = 0.99999999506209258 + + expected = [-0.7631631094219556269, + -0.6087553082505590832, + -0.2167926269368471279] + + res = erfa.ab(pnat, v, s, bm1) + assert res.shape == (3,) + + np.testing.assert_allclose(res, expected) + + res2 = erfa.ab([pnat]*4, v, s, bm1) + assert res2.shape == (4, 3) + np.testing.assert_allclose(res2, [expected]*4) + + # here we stride an array and also do it Fortran-order to make sure + # it all still works correctly with non-contig arrays + pnata = np.array(pnat) + arrin = np.array([pnata, pnata/2, pnata/3, pnata/4, pnata/5]*4, order='F') + res3 = erfa.ab(arrin[::5], v, s, bm1) + assert res3.shape == (4, 3) + np.testing.assert_allclose(res3, [expected]*4) + + +def test_matrix_in(): + jd1 = 2456165.5 + jd2 = 0.401182685 + + pvmat = np.empty((2, 3)) + pvmat[0][0] = -6241497.16 + pvmat[0][1] = 401346.896 + pvmat[0][2] = -1251136.04 + pvmat[1][0] = -29.264597 + pvmat[1][1] = -455.021831 + pvmat[1][2] = 0.0266151194 + + astrom = erfa.apcs13(jd1, jd2, pvmat) + assert astrom.shape == () + + # values from t_erfa_c + np.testing.assert_allclose(astrom['pmt'], 12.65133794027378508) + np.testing.assert_allclose(astrom['em'], 1.010428384373318379) + np.testing.assert_allclose(astrom['eb'], [.9012691529023298391, + -.4173999812023068781, + -.1809906511146821008]) + np.testing.assert_allclose(astrom['bpn'], np.eye(3)) + + # first make sure it *fails* if we mess with the input orders + pvmatbad = np.roll(pvmat.ravel(), 1).reshape((2, 3)) + astrombad = erfa.apcs13(jd1, jd2, pvmatbad) + assert not np.allclose(astrombad['em'], 1.010428384373318379) + + pvmatarr = np.array([pvmat]*3) + astrom2 = erfa.apcs13(jd1, jd2, pvmatarr) + assert astrom2.shape == (3,) + np.testing.assert_allclose(astrom2['em'], 1.010428384373318379) + + # try striding of the input array to make non-contiguous + pvmatarr = np.array([pvmat]*9)[::3] + astrom3 = erfa.apcs13(jd1, jd2, pvmatarr) + assert astrom3.shape == (3,) + np.testing.assert_allclose(astrom3['em'], 1.010428384373318379) + + # try fortran-order + pvmatarr = np.array([pvmat]*3, order='F') + astrom4 = erfa.apcs13(jd1, jd2, pvmatarr) + assert astrom4.shape == (3,) + np.testing.assert_allclose(astrom4['em'], 1.010428384373318379) + + +def test_structs(): + """ + Checks producing and consuming of ERFA c structs + """ + + am, eo = erfa.apci13(2456165.5, [0.401182685, 1]) + assert am.shape == (2, ) + assert am.dtype == erfa.dt_eraASTROM + assert eo.shape == (2, ) + + # a few spotchecks from test_erfa.c + np.testing.assert_allclose(am[0]['pmt'], 12.65133794027378508) + np.testing.assert_allclose(am[0]['v'], [0.4289638897157027528e-4, + 0.8115034002544663526e-4, + 0.3517555122593144633e-4]) + + ri, di = erfa.atciqz(2.71, 0.174, am[0]) + np.testing.assert_allclose(ri, 2.709994899247599271) + np.testing.assert_allclose(di, 0.1728740720983623469) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/test_erfa.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/test_erfa.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96944ef4f697139ef300e3d17dc323d388e0e388 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/_erfa/tests/test_erfa.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..439fcd081f15b38d362f0feb49ffbee45433842c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/__init__.py @@ -0,0 +1,10 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""This package contains analytic functions useful for astronomy. + +In future versions of ``astropy``, many of these might be +accessible as `~astropy.modeling.core.Model`. + +""" + +# Shortcuts for most commonly used blackbody functions +from .blackbody import blackbody_nu, blackbody_lambda diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fb8514460f2d773ab197166fe817cb3ad4c6f21 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/blackbody.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/blackbody.py new file mode 100644 index 0000000000000000000000000000000000000000..cdfdb39f285ffc3e8684e1525962af248077eea1 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/blackbody.py @@ -0,0 +1,79 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""Functions related to blackbody radiation.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +# LOCAL +from ..modeling import blackbody as _bb +from ..utils.decorators import deprecated + + +__all__ = ['blackbody_nu', 'blackbody_lambda'] + +# Units +FNU = _bb.FNU +FLAM = _bb.FLAM + + +@deprecated('2.0', alternative='astropy.modeling.blackbody.blackbody_nu') +def blackbody_nu(in_x, temperature): + """Calculate blackbody flux per steradian, :math:`B_{\\nu}(T)`. + + .. note:: + + Use `numpy.errstate` to suppress Numpy warnings, if desired. + + .. warning:: + + Output values might contain ``nan`` and ``inf``. + + Parameters + ---------- + in_x : number, array-like, or `~astropy.units.Quantity` + Frequency, wavelength, or wave number. + If not a Quantity, it is assumed to be in Hz. + + temperature : number, array-like, or `~astropy.units.Quantity` + Blackbody temperature. + If not a Quantity, it is assumed to be in Kelvin. + + Returns + ------- + flux : `~astropy.units.Quantity` + Blackbody monochromatic flux in + :math:`erg \\; cm^{-2} s^{-1} Hz^{-1} sr^{-1}`. + + Raises + ------ + ValueError + Invalid temperature. + + ZeroDivisionError + Wavelength is zero (when converting to frequency). + + """ + return _bb.blackbody_nu(in_x, temperature) + + +@deprecated('2.0', alternative='astropy.modeling.blackbody.blackbody_lambda') +def blackbody_lambda(in_x, temperature): + """Like :func:`blackbody_nu` but for :math:`B_{\\lambda}(T)`. + + Parameters + ---------- + in_x : number, array-like, or `~astropy.units.Quantity` + Frequency, wavelength, or wave number. + If not a Quantity, it is assumed to be in Angstrom. + + temperature : number, array-like, or `~astropy.units.Quantity` + Blackbody temperature. + If not a Quantity, it is assumed to be in Kelvin. + + Returns + ------- + flux : `~astropy.units.Quantity` + Blackbody monochromatic flux in + :math:`erg \\; cm^{-2} s^{-1} \\mathring{A}^{-1} sr^{-1}`. + + """ + return _bb.blackbody_lambda(in_x, temperature) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/blackbody.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/blackbody.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c70c62c186ba5a49d021229fd491d7c41f9707b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/blackbody.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e25aa47c4e59875893557b2e909b1c3eff93849f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/test_blackbody.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/test_blackbody.py new file mode 100644 index 0000000000000000000000000000000000000000..ceb429c0557cf6194a215fdc1bf6e5f7bec2e79d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/test_blackbody.py @@ -0,0 +1,22 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""Tests for blackbody functions.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +# LOCAL +from ..blackbody import blackbody_nu, blackbody_lambda +from ... import units as u +from ...tests.helper import catch_warnings +from ...utils.exceptions import AstropyDeprecationWarning + +__doctest_skip__ = ['*'] + + +def test_deprecated_blackbodies(): + with catch_warnings(AstropyDeprecationWarning) as w: + blackbody_nu(5000 * u.AA, 6000 * u.K) + assert len(w) == 1 + + with catch_warnings(AstropyDeprecationWarning) as w: + blackbody_lambda(5000 * u.AA, 6000 * u.K) + assert len(w) == 1 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/test_blackbody.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/test_blackbody.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25c7789e4d5654f996a056538acd2297a4485f9e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/analytic_functions/tests/test_blackbody.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/astropy.cfg b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/astropy.cfg new file mode 100644 index 0000000000000000000000000000000000000000..af7932f0bf420cd3ed48cd84132718a8f4228774 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/astropy.cfg @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- + +### CONSOLE SETTINGS + +## Use Unicode characters when outputting values, and writing widgets to the +## console. +# unicode_output = False + +## When True, use ANSI color escape sequences when writing to the console. +# use_color = True + +## Maximum number of lines for the pretty-printer. If not provided, +## determine automatically from the size of the terminal. -1 means no +## limit. +# max_lines = + +## Maximum number of characters-per-line for the pretty-printer. If +## not provided, determine automatically from the size of the +## terminal, if possible. -1 means no limit. +# max_width = + + +### CORE DATA STRUCTURES AND TRANSFORMATIONS + +[nddata] + +## Whether to issue a warning if NDData arithmetic is performed with +## uncertainties and the uncertainties do not support the propagation of +## correlated uncertainties. +# warn_unsupported_correlated = True + +## Whether to issue a warning when the `~astropy.nddata.NDData` unit +## attribute is changed from a non-``None`` value to another value +## that data values/uncertainties are not scaled with the unit change. +# warn_setting_unit_directly = True + +[table] + +## The template that determines the name of a column if it cannot be +## determined. Uses new-style (format method) string formatting +# auto_colname = col{0} + +[table.jsviewer] + +## The URL to the jQuery library to use. If not provided, uses the +## internal copy installed with astropy. +# jquery_url = + +## The URL to the jQuery datatables library to use. If not provided, +## uses the internal copy installed with astropy. +# datatables_url = + +### ASTRONOMY COMPUTATIONS AND UTILITIES + +[vo] + +## URL where VO Service database file is stored. +# vos_baseurl = http://stsdas.stsci.edu/astrolib/vo_databases/ + +## Conesearch database name. +# conesearch_dbname = conesearch_good + +[samp] + +## Whether to allow astropy.samp to use the internet, if available +# use_internet = True + +## How many times to retry communications when they fail +# n_retries = 10 + +[vo.validator] + +## Cone Search services master list for validation. +# conesearch_master_list = http://vao.stsci.edu/directory/NVORegInt.asmx/VOTCapabilityPredOpt?predicate=1%3D1&capability=conesearch&VOTStyleOption=2 + +## Only check these Cone Search URLs. +# conesearch_urls = + +## VO Table warning codes that are considered non-critical +# noncritical_warnings = W03, W06, W07, W09, W10, W15, W17, W20, W21, W22, W27, W28, W29, W41, W42, W48, W50 + + +### INPUT/OUTPUT + +[io.fits] + +## If True, enable support for record-valued keywords as described by FITS WCS +## Paper IV. Otherwise they are treated as normal keywords. +# enable_record_valued_keyword_cards = True + +## If True, extension names (i.e. the EXTNAME keyword) should be treated as +## case-sensitive. +# extension_name_case_sensitive = False + +## If True, automatically remove trailing whitespace for string values in +## headers. Otherwise the values are returned verbatim, with all whitespace +## intact. +# strip_header_whitespace = True + +## If True, use memory-mapped file access to read/write the data in FITS files. +## This generally provides better performance, especially for large files, but +## may affect performance in I/O-heavy applications. +# use_memmap = True + +[io.votable] + +## When True, treat fixable violations of the VOTable spec as exceptions. +# pedantic = True + + +### NUTS AND BOLTS OF ASTROPY + + +[logger] + +## Threshold for the logging messages. Logging messages that are less severe +## than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +## 'ERROR' +# log_level = INFO + +## Whether to log warnings.warn calls +# log_warnings = True + +## Whether to log exceptions before raising them +# log_exceptions = False + +## Whether to always log messages to a log file +# log_to_file = False + +## The file to log messages to. When '', it defaults to a file 'astropy.log' in +## the astropy config directory. +# log_file_path = "" + +## Threshold for logging messages to log_file_path +# log_file_level = INFO + +## Format for log file entries +# log_file_format = "%(asctime)r, %(origin)r, %(levelname)r, %(message)r" + +[utils.data] + +## URL for astropy remote data site. +# dataurl = http://data.astropy.org/ + +## Time to wait for remote data query (in seconds). +# remote_timeout = 3.0 + +## Block size for computing MD5 file hashes. +# hash_block_size = 65536 + +## Number of bytes of remote data to download per step. +# download_block_size = 65536 + +## Number of times to try to get the lock while accessing the data cache before +## giving up. +# download_cache_lock_attempts = 5 + +## If True, temporary download files created when the cache is inacessible will +## be deleted at the end of the python session. +# delete_temporary_downloads_at_exit = True diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0643f7c5b9004cd4f67b38136d97b14974d8d24e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/__init__.py @@ -0,0 +1,13 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains configuration and setup utilities for the +Astropy project. This includes all functionality related to the +affiliated package index. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from .paths import * +from .configuration import * +from .affiliated import * diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ed730dae41565d239af9656d3731ea2c5d8d86d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/affiliated.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/affiliated.py new file mode 100644 index 0000000000000000000000000000000000000000..305dfedd67d8e53e4844c4d952eae54d902b4c30 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/affiliated.py @@ -0,0 +1,9 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""This module contains functions and classes for finding information about +affiliated packages and installing them. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +__all__ = [] diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/affiliated.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/affiliated.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5197dcc2865c1286b39a5d3ec4c6c0fcb48004d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/affiliated.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/configuration.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..dca52f9999ec3a91011ea534e293ebf0d6aa978f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/configuration.py @@ -0,0 +1,724 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""This module contains classes and functions to standardize access to +configuration files for Astropy and affiliated packages. + +.. note:: + The configuration system makes use of the 'configobj' package, which stores + configuration in a text format like that used in the standard library + `ConfigParser`. More information and documentation for configobj can be + found at http://www.voidspace.org.uk/python/configobj.html. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six + +from contextlib import contextmanager +import hashlib +import io +from os import path +import re +from warnings import warn + +from ..extern.configobj import configobj, validate +from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning +from ..utils import find_current_module +from ..utils.introspection import resolve_name +from ..utils.misc import InheritDocstrings +from .paths import get_config_dir + + +__all__ = ['InvalidConfigurationItemWarning', + 'ConfigurationMissingWarning', 'get_config', + 'reload_config', 'ConfigNamespace', 'ConfigItem'] + + +class InvalidConfigurationItemWarning(AstropyWarning): + """ A Warning that is issued when the configuration value specified in the + astropy configuration file does not match the type expected for that + configuration value. + """ + + +class ConfigurationMissingWarning(AstropyWarning): + """ A Warning that is issued when the configuration directory cannot be + accessed (usually due to a permissions problem). If this warning appears, + configuration items will be set to their defaults rather than read from the + configuration file, and no configuration will persist across sessions. + """ + + +# these are not in __all__ because it's not intended that a user ever see them +class ConfigurationDefaultMissingError(ValueError): + """ An exception that is raised when the configuration defaults (which + should be generated at build-time) are missing. + """ + + +# this is used in astropy/__init__.py +class ConfigurationDefaultMissingWarning(AstropyWarning): + """ A warning that is issued when the configuration defaults (which + should be generated at build-time) are missing. + """ + + +class ConfigurationChangedWarning(AstropyWarning): + """ + A warning that the configuration options have changed. + """ + + +class _ConfigNamespaceMeta(type): + def __init__(cls, name, bases, dict): + if cls.__bases__[0] is object: + return + + for key, val in six.iteritems(dict): + if isinstance(val, ConfigItem): + val.name = key + + +@six.add_metaclass(_ConfigNamespaceMeta) +class ConfigNamespace(object): + """ + A namespace of configuration items. Each subpackage with + configuration items should define a subclass of this class, + containing `ConfigItem` instances as members. + + For example:: + + class Conf(_config.ConfigNamespace): + unicode_output = _config.ConfigItem( + False, + 'Use Unicode characters when outputting values, ...') + use_color = _config.ConfigItem( + sys.platform != 'win32', + 'When True, use ANSI color escape sequences when ...', + aliases=['astropy.utils.console.USE_COLOR']) + conf = Conf() + """ + def set_temp(self, attr, value): + """ + Temporarily set a configuration value. + + Parameters + ---------- + attr : str + Configuration item name + + value : object + The value to set temporarily. + + Examples + -------- + >>> import astropy + >>> with astropy.conf.set_temp('use_color', False): + ... pass + ... # console output will not contain color + >>> # console output contains color again... + """ + if hasattr(self, attr): + return self.__class__.__dict__[attr].set_temp(value) + raise AttributeError("No configuration parameter '{0}'".format(attr)) + + def reload(self, attr=None): + """ + Reload a configuration item from the configuration file. + + Parameters + ---------- + attr : str, optional + The name of the configuration parameter to reload. If not + provided, reload all configuration parameters. + """ + if attr is not None: + if hasattr(self, attr): + return self.__class__.__dict__[attr].reload() + raise AttributeError("No configuration parameter '{0}'".format(attr)) + + for item in six.itervalues(self.__class__.__dict__): + if isinstance(item, ConfigItem): + item.reload() + + def reset(self, attr=None): + """ + Reset a configuration item to its default. + + Parameters + ---------- + attr : str, optional + The name of the configuration parameter to reload. If not + provided, reset all configuration parameters. + """ + if attr is not None: + if hasattr(self, attr): + prop = self.__class__.__dict__[attr] + prop.set(prop.defaultvalue) + return + raise AttributeError("No configuration parameter '{0}'".format(attr)) + + for item in six.itervalues(self.__class__.__dict__): + if isinstance(item, ConfigItem): + item.set(item.defaultvalue) + + +@six.add_metaclass(InheritDocstrings) +class ConfigItem(object): + """ + A setting and associated value stored in a configuration file. + + These objects should be created as members of + `ConfigNamespace` subclasses, for example:: + + class _Conf(config.ConfigNamespace): + unicode_output = config.ConfigItem( + False, + 'Use Unicode characters when outputting values, and writing widgets ' + 'to the console.') + conf = _Conf() + + Parameters + ---------- + defaultvalue : object, optional + The default value for this item. If this is a list of strings, this + item will be interpreted as an 'options' value - this item must be one + of those values, and the first in the list will be taken as the default + value. + + description : str or None, optional + A description of this item (will be shown as a comment in the + configuration file) + + cfgtype : str or None, optional + A type specifier like those used as the *values* of a particular key + in a ``configspec`` file of ``configobj``. If None, the type will be + inferred from the default value. + + module : str or None, optional + The full module name that this item is associated with. The first + element (e.g. 'astropy' if this is 'astropy.config.configuration') + will be used to determine the name of the configuration file, while + the remaining items determine the section. If None, the package will be + inferred from the package within whiich this object's initializer is + called. + + aliases : str, or list of str, optional + The deprecated location(s) of this configuration item. If the + config item is not found at the new location, it will be + searched for at all of the old locations. + + Raises + ------ + RuntimeError + If ``module`` is `None`, but the module this item is created from + cannot be determined. + """ + + # this is used to make validation faster so a Validator object doesn't + # have to be created every time + _validator = validate.Validator() + cfgtype = None + """ + A type specifier like those used as the *values* of a particular key in a + ``configspec`` file of ``configobj``. + """ + + def __init__(self, defaultvalue='', description=None, cfgtype=None, + module=None, aliases=None): + from ..utils import isiterable + + if module is None: + module = find_current_module(2) + if module is None: + msg1 = 'Cannot automatically determine get_config module, ' + msg2 = 'because it is not called from inside a valid module' + raise RuntimeError(msg1 + msg2) + else: + module = module.__name__ + + self.module = module + self.description = description + self.__doc__ = description + + # now determine cfgtype if it is not given + if cfgtype is None: + if (isiterable(defaultvalue) and not + isinstance(defaultvalue, six.string_types)): + # it is an options list + dvstr = [six.text_type(v) for v in defaultvalue] + cfgtype = 'option(' + ', '.join(dvstr) + ')' + defaultvalue = dvstr[0] + elif isinstance(defaultvalue, bool): + cfgtype = 'boolean' + elif isinstance(defaultvalue, int): + cfgtype = 'integer' + elif isinstance(defaultvalue, float): + cfgtype = 'float' + elif isinstance(defaultvalue, six.string_types): + cfgtype = 'string' + defaultvalue = six.text_type(defaultvalue) + + self.cfgtype = cfgtype + + self._validate_val(defaultvalue) + self.defaultvalue = defaultvalue + + if aliases is None: + self.aliases = [] + elif isinstance(aliases, six.string_types): + self.aliases = [aliases] + else: + self.aliases = aliases + + def __set__(self, obj, value): + return self.set(value) + + def __get__(self, obj, objtype=None): + if obj is None: + return self + return self() + + def set(self, value): + """ + Sets the current value of this ``ConfigItem``. + + This also updates the comments that give the description and type + information. + + Parameters + ---------- + value + The value this item should be set to. + + Raises + ------ + TypeError + If the provided ``value`` is not valid for this ``ConfigItem``. + """ + try: + value = self._validate_val(value) + except validate.ValidateError as e: + msg = 'Provided value for configuration item {0} not valid: {1}' + raise TypeError(msg.format(self.name, e.args[0])) + + sec = get_config(self.module) + + sec[self.name] = value + + @contextmanager + def set_temp(self, value): + """ + Sets this item to a specified value only inside a with block. + + Use as:: + + ITEM = ConfigItem('ITEM', 'default', 'description') + + with ITEM.set_temp('newval'): + #... do something that wants ITEM's value to be 'newval' ... + print(ITEM) + + # ITEM is now 'default' after the with block + + Parameters + ---------- + value + The value to set this item to inside the with block. + + """ + initval = self() + self.set(value) + try: + yield + finally: + self.set(initval) + + def reload(self): + """ Reloads the value of this ``ConfigItem`` from the relevant + configuration file. + + Returns + ------- + val + The new value loaded from the configuration file. + """ + self.set(self.defaultvalue) + baseobj = get_config(self.module, True) + secname = baseobj.name + + cobj = baseobj + # a ConfigObj's parent is itself, so we look for the parent with that + while cobj.parent is not cobj: + cobj = cobj.parent + + newobj = configobj.ConfigObj(cobj.filename, interpolation=False) + if secname is not None: + if secname not in newobj: + return baseobj.get(self.name) + newobj = newobj[secname] + + if self.name in newobj: + baseobj[self.name] = newobj[self.name] + return baseobj.get(self.name) + + def __repr__(self): + out = '<{0}: name={1!r} value={2!r} at 0x{3:x}>'.format( + self.__class__.__name__, self.name, self(), id(self)) + return out + + def __str__(self): + out = '\n'.join(('{0}: {1}', + ' cfgtype={2!r}', + ' defaultvalue={3!r}', + ' description={4!r}', + ' module={5}', + ' value={6!r}')) + out = out.format(self.__class__.__name__, self.name, self.cfgtype, + self.defaultvalue, self.description, self.module, + self()) + return out + + def __call__(self): + """ Returns the value of this ``ConfigItem`` + + Returns + ------- + val + This item's value, with a type determined by the ``cfgtype`` + attribute. + + Raises + ------ + TypeError + If the configuration value as stored is not this item's type. + """ + def section_name(section): + if section == '': + return 'at the top-level' + else: + return 'in section [{0}]'.format(section) + + options = [] + sec = get_config(self.module) + if self.name in sec: + options.append((sec[self.name], self.module, self.name)) + + for alias in self.aliases: + module, name = alias.rsplit('.', 1) + sec = get_config(module) + if '.' in module: + filename, module = module.split('.', 1) + else: + filename = module + module = '' + if name in sec: + if '.' in self.module: + new_module = self.module.split('.', 1)[1] + else: + new_module = '' + warn( + "Config parameter '{0}' {1} of the file '{2}' " + "is deprecated. Use '{3}' {4} instead.".format( + name, section_name(module), get_config_filename(filename), + self.name, section_name(new_module)), + AstropyDeprecationWarning) + options.append((sec[name], module, name)) + + if len(options) == 0: + self.set(self.defaultvalue) + options.append((self.defaultvalue, None, None)) + + if len(options) > 1: + filename, sec = self.module.split('.', 1) + warn( + "Config parameter '{0}' {1} of the file '{2}' is " + "given by more than one alias ({3}). Using the first.".format( + self.name, section_name(sec), get_config_filename(filename), + ', '.join([ + '.'.join(x[1:3]) for x in options if x[1] is not None])), + AstropyDeprecationWarning) + + val = options[0][0] + + try: + return self._validate_val(val) + except validate.ValidateError as e: + raise TypeError('Configuration value not valid:' + e.args[0]) + + def _validate_val(self, val): + """ Validates the provided value based on cfgtype and returns the + type-cast value + + throws the underlying configobj exception if it fails + """ + # note that this will normally use the *class* attribute `_validator`, + # but if some arcane reason is needed for making a special one for an + # instance or sub-class, it will be used + return self._validator.check(self.cfgtype, val) + + +# this dictionary stores the master copy of the ConfigObj's for each +# root package +_cfgobjs = {} + + +def get_config_filename(packageormod=None): + """ + Get the filename of the config file associated with the given + package or module. + """ + cfg = get_config(packageormod) + while cfg.parent is not cfg: + cfg = cfg.parent + return cfg.filename + + +# This is used by testing to override the config file, so we can test +# with various config files that exercise different features of the +# config system. +_override_config_file = None + + +def get_config(packageormod=None, reload=False): + """ Gets the configuration object or section associated with a particular + package or module. + + Parameters + ----------- + packageormod : str or None + The package for which to retrieve the configuration object. If a + string, it must be a valid package name, or if `None`, the package from + which this function is called will be used. + + reload : bool, optional + Reload the file, even if we have it cached. + + Returns + ------- + cfgobj : ``configobj.ConfigObj`` or ``configobj.Section`` + If the requested package is a base package, this will be the + ``configobj.ConfigObj`` for that package, or if it is a subpackage or + module, it will return the relevant ``configobj.Section`` object. + + Raises + ------ + RuntimeError + If ``packageormod`` is `None`, but the package this item is created + from cannot be determined. + """ + if packageormod is None: + packageormod = find_current_module(2) + if packageormod is None: + msg1 = 'Cannot automatically determine get_config module, ' + msg2 = 'because it is not called from inside a valid module' + raise RuntimeError(msg1 + msg2) + else: + packageormod = packageormod.__name__ + + packageormodspl = packageormod.split('.') + rootname = packageormodspl[0] + secname = '.'.join(packageormodspl[1:]) + + cobj = _cfgobjs.get(rootname, None) + + if cobj is None or reload: + if _ASTROPY_SETUP_: + # There's no reason to use anything but the default config + cobj = configobj.ConfigObj(interpolation=False) + else: + cfgfn = None + try: + # This feature is intended only for use by the unit tests + if _override_config_file is not None: + cfgfn = _override_config_file + else: + cfgfn = path.join(get_config_dir(), rootname + '.cfg') + cobj = configobj.ConfigObj(cfgfn, interpolation=False) + except (IOError, OSError) as e: + msg = ('Configuration defaults will be used due to ') + errstr = '' if len(e.args) < 1 else (':' + str(e.args[0])) + msg += e.__class__.__name__ + errstr + msg += ' on {0}'.format(cfgfn) + warn(ConfigurationMissingWarning(msg)) + + # This caches the object, so if the file becomes accessible, this + # function won't see it unless the module is reloaded + cobj = configobj.ConfigObj(interpolation=False) + + _cfgobjs[rootname] = cobj + + if secname: # not the root package + if secname not in cobj: + cobj[secname] = {} + return cobj[secname] + else: + return cobj + + +def reload_config(packageormod=None): + """ Reloads configuration settings from a configuration file for the root + package of the requested package/module. + + This overwrites any changes that may have been made in `ConfigItem` + objects. This applies for any items that are based on this file, which + is determined by the *root* package of ``packageormod`` + (e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'`` + module). + + Parameters + ---------- + packageormod : str or None + The package or module name - see `get_config` for details. + """ + sec = get_config(packageormod, True) + # look for the section that is its own parent - that's the base object + while sec.parent is not sec: + sec = sec.parent + sec.reload() + + +def is_unedited_config_file(content, template_content=None): + """ + Determines if a config file can be safely replaced because it doesn't + actually contain any meaningful content. + + To meet this criteria, the config file must be either: + + - All comments or completely empty + + - An exact match to a "legacy" version of the config file prior to + Astropy 0.4, when APE3 was implemented and the config file + contained commented-out values by default. + """ + # We want to calculate the md5sum using universal line endings, so + # that even if the files had their line endings converted to \r\n + # on Windows, this will still work. + + content = content.encode('latin-1') + + # The jquery_url setting, present in 0.3.2 and later only, is + # effectively auto-generated by the build system, so we need to + # ignore it in the md5sum calculation for 0.3.2. + content = re.sub(br'\njquery_url\s*=\s*[^\n]+', b'', content) + + # First determine if the config file has any effective content + buffer = io.BytesIO(content) + buffer.seek(0) + raw_cfg = configobj.ConfigObj(buffer, interpolation=True) + for v in six.itervalues(raw_cfg): + if len(v): + break + else: + return True + + # Now determine if it matches the md5sum of a known, unedited + # config file. + known_configs = set([ + '7d4b4f1120304b286d71f205975b1286', # v0.3.2 + '5df7e409425e5bfe7ed041513fda3288', # v0.3 + '8355f99a01b3bdfd8761ef45d5d8b7e5', # v0.2 + '4ea5a84de146dc3fcea2a5b93735e634' # v0.2.1, v0.2.2, v0.2.3, v0.2.4, v0.2.5 + ]) + + md5 = hashlib.md5() + md5.update(content) + digest = md5.hexdigest() + return digest in known_configs + + +# this is not in __all__ because it's not intended that a user uses it +def update_default_config(pkg, default_cfg_dir_or_fn, version=None): + """ + Checks if the configuration file for the specified package exists, + and if not, copy over the default configuration. If the + configuration file looks like it has already been edited, we do + not write over it, but instead write a file alongside it named + ``pkg.version.cfg`` as a "template" for the user. + + Parameters + ---------- + pkg : str + The package to be updated. + default_cfg_dir_or_fn : str + The filename or directory name where the default configuration file is. + If a directory name, ``'pkg.cfg'`` will be used in that directory. + version : str, optional + The current version of the given package. If not provided, it will + be obtained from ``pkg.__version__``. + + Returns + ------- + updated : bool + If the profile was updated, `True`, otherwise `False`. + + Raises + ------ + AttributeError + If the version number of the package could not determined. + + """ + + if path.isdir(default_cfg_dir_or_fn): + default_cfgfn = path.join(default_cfg_dir_or_fn, pkg + '.cfg') + else: + default_cfgfn = default_cfg_dir_or_fn + + if not path.isfile(default_cfgfn): + # There is no template configuration file, which basically + # means the affiliated package is not using the configuration + # system, so just return. + return False + + cfgfn = get_config(pkg).filename + + with io.open(default_cfgfn, 'rt', encoding='latin-1') as fr: + template_content = fr.read() + + doupdate = False + if cfgfn is not None: + if path.exists(cfgfn): + with io.open(cfgfn, 'rt', encoding='latin-1') as fd: + content = fd.read() + + identical = (content == template_content) + + if not identical: + doupdate = is_unedited_config_file( + content, template_content) + elif path.exists(path.dirname(cfgfn)): + doupdate = True + identical = False + + if version is None: + version = resolve_name(pkg, '__version__') + + # Don't install template files for dev versions, or we'll end up + # spamming `~/.astropy/config`. + if 'dev' not in version and cfgfn is not None: + template_path = path.join( + get_config_dir(), '{0}.{1}.cfg'.format(pkg, version)) + needs_template = not path.exists(template_path) + else: + needs_template = False + + if doupdate or needs_template: + if needs_template: + with io.open(template_path, 'wt', encoding='latin-1') as fw: + fw.write(template_content) + # If we just installed a new template file and we can't + # update the main configuration file because it has user + # changes, display a warning. + if not identical and not doupdate: + warn( + "The configuration options in {0} {1} may have changed, " + "your configuration file was not updated in order to " + "preserve local changes. A new configuration template " + "has been saved to '{2}'.".format( + pkg, version, template_path), + ConfigurationChangedWarning) + + if doupdate and not identical: + with io.open(cfgfn, 'wt', encoding='latin-1') as fw: + fw.write(template_content) + return True + + return False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/configuration.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/configuration.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bfa80232e884fa1b6b35e4b5637a00577c0a2e4 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/configuration.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/paths.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/paths.py new file mode 100644 index 0000000000000000000000000000000000000000..dd0162150ac97cc418353b1ce77e5bf1a1d14a42 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/paths.py @@ -0,0 +1,315 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" This module contains functions to determine where configuration and +data/cache files used by Astropy should be placed. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from ..extern import six +from ..utils.decorators import wraps + +import os +import shutil +import sys + + +__all__ = ['get_config_dir', 'get_cache_dir', 'set_temp_config', + 'set_temp_cache'] + + +def _find_home(): + """ Locates and return the home directory (or best approximation) on this + system. + + Raises + ------ + OSError + If the home directory cannot be located - usually means you are running + Astropy on some obscure platform that doesn't have standard home + directories. + """ + + # this is used below to make fix up encoding issues that sometimes crop up + # in py2.x but not in py3.x + if six.PY2: + decodepath = lambda pth: pth.decode(sys.getfilesystemencoding()) + else: + decodepath = lambda pth: pth + + # First find the home directory - this is inspired by the scheme ipython + # uses to identify "home" + if os.name == 'posix': + # Linux, Unix, AIX, OS X + if 'HOME' in os.environ: + homedir = decodepath(os.environ['HOME']) + else: + raise OSError('Could not find unix home directory to search for ' + 'astropy config dir') + elif os.name == 'nt': # This is for all modern Windows (NT or after) + if 'MSYSTEM' in os.environ and os.environ.get('HOME'): + # Likely using an msys shell; use whatever it is using for its + # $HOME directory + homedir = decodepath(os.environ['HOME']) + # Next try for a network home + elif 'HOMESHARE' in os.environ: + homedir = decodepath(os.environ['HOMESHARE']) + # See if there's a local home + elif 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ: + homedir = os.path.join(os.environ['HOMEDRIVE'], + os.environ['HOMEPATH']) + homedir = decodepath(homedir) + # Maybe a user profile? + elif 'USERPROFILE' in os.environ: + homedir = decodepath(os.path.join(os.environ['USERPROFILE'])) + else: + try: + from ..extern.six.moves import winreg as wreg + shell_folders = r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders' + key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders) + + homedir = wreg.QueryValueEx(key, 'Personal')[0] + homedir = decodepath(homedir) + key.Close() + except Exception: + # As a final possible resort, see if HOME is present + if 'HOME' in os.environ: + homedir = decodepath(os.environ['HOME']) + else: + raise OSError('Could not find windows home directory to ' + 'search for astropy config dir') + else: + # for other platforms, try HOME, although it probably isn't there + if 'HOME' in os.environ: + homedir = decodepath(os.environ['HOME']) + else: + raise OSError('Could not find a home directory to search for ' + 'astropy config dir - are you on an unspported ' + 'platform?') + return homedir + + +def get_config_dir(create=True): + """ + Determines the Astropy configuration directory name and creates the + directory if it doesn't exist. + + This directory is typically ``$HOME/.astropy/config``, but if the + XDG_CONFIG_HOME environment variable is set and the + ``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory. + If neither exists, the former will be created and symlinked to the latter. + + Returns + ------- + configdir : str + The absolute path to the configuration directory. + + """ + + # symlink will be set to this if the directory is created + linkto = None + + # If using set_temp_config, that overrides all + if set_temp_config._temp_path is not None: + xch = set_temp_config._temp_path + config_path = os.path.join(xch, 'astropy') + if not os.path.exists(config_path): + os.mkdir(config_path) + return os.path.abspath(config_path) + + # first look for XDG_CONFIG_HOME + xch = os.environ.get('XDG_CONFIG_HOME') + + if xch is not None and os.path.exists(xch): + xchpth = os.path.join(xch, 'astropy') + if not os.path.islink(xchpth): + if os.path.exists(xchpth): + return os.path.abspath(xchpth) + else: + linkto = xchpth + return os.path.abspath(_find_or_create_astropy_dir('config', linkto)) + + +def get_cache_dir(): + """ + Determines the Astropy cache directory name and creates the directory if it + doesn't exist. + + This directory is typically ``$HOME/.astropy/cache``, but if the + XDG_CACHE_HOME environment variable is set and the + ``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory. + If neither exists, the former will be created and symlinked to the latter. + + Returns + ------- + cachedir : str + The absolute path to the cache directory. + + """ + + # symlink will be set to this if the directory is created + linkto = None + + # If using set_temp_cache, that overrides all + if set_temp_cache._temp_path is not None: + xch = set_temp_cache._temp_path + cache_path = os.path.join(xch, 'astropy') + if not os.path.exists(cache_path): + os.mkdir(cache_path) + return os.path.abspath(cache_path) + + # first look for XDG_CACHE_HOME + xch = os.environ.get('XDG_CACHE_HOME') + + if xch is not None and os.path.exists(xch): + xchpth = os.path.join(xch, 'astropy') + if not os.path.islink(xchpth): + if os.path.exists(xchpth): + return os.path.abspath(xchpth) + else: + linkto = xchpth + + return os.path.abspath(_find_or_create_astropy_dir('cache', linkto)) + + +class _SetTempPath(object): + _temp_path = None + _default_path_getter = None + + def __init__(self, path=None, delete=False): + if path is not None: + path = os.path.abspath(path) + + self._path = path + self._delete = delete + self._prev_path = self.__class__._temp_path + + def __enter__(self): + self.__class__._temp_path = self._path + return self._default_path_getter() + + def __exit__(self, *args): + self.__class__._temp_path = self._prev_path + + if self._delete and self._path is not None: + shutil.rmtree(self._path) + + def __call__(self, func): + """Implements use as a decorator.""" + + @wraps(func) + def wrapper(*args, **kwargs): + with self: + func(*args, **kwargs) + + return wrapper + + +class set_temp_config(_SetTempPath): + """ + Context manager to set a temporary path for the Astropy config, primarily + for use with testing. + + If the path set by this context manager does not already exist it will be + created, if possible. + + This may also be used as a decorator on a function to set the config path + just within that function. + + Parameters + ---------- + + path : str, optional + The directory (which must exist) in which to find the Astropy config + files, or create them if they do not already exist. If None, this + restores the config path to the user's default config path as returned + by `get_config_dir` as though this context manager were not in effect + (this is useful for testing). In this case the ``delete`` argument is + always ignored. + + delete : bool, optional + If True, cleans up the temporary directory after exiting the temp + context (default: False). + """ + + _default_path_getter = staticmethod(get_config_dir) + + def __enter__(self): + # Special case for the config case, where we need to reset all the + # cached config objects + from .configuration import _cfgobjs + + path = super(set_temp_config, self).__enter__() + _cfgobjs.clear() + return path + + def __exit__(self, *args): + from .configuration import _cfgobjs + + super(set_temp_config, self).__exit__(*args) + _cfgobjs.clear() + + +class set_temp_cache(_SetTempPath): + """ + Context manager to set a temporary path for the Astropy download cache, + primarily for use with testing (though there may be other applications + for setting a different cache directory, for example to switch to a cache + dedicated to large files). + + If the path set by this context manager does not already exist it will be + created, if possible. + + This may also be used as a decorator on a function to set the cache path + just within that function. + + Parameters + ---------- + + path : str + The directory (which must exist) in which to find the Astropy cache + files, or create them if they do not already exist. If None, this + restores the cache path to the user's default cache path as returned + by `get_cache_dir` as though this context manager were not in effect + (this is useful for testing). In this case the ``delete`` argument is + always ignored. + + delete : bool, optional + If True, cleans up the temporary directory after exiting the temp + context (default: False). + """ + + _default_path_getter = staticmethod(get_cache_dir) + + +def _find_or_create_astropy_dir(dirnm, linkto): + innerdir = os.path.join(_find_home(), '.astropy') + maindir = os.path.join(_find_home(), '.astropy', dirnm) + + if not os.path.exists(maindir): + # first create .astropy dir if needed + if not os.path.exists(innerdir): + try: + os.mkdir(innerdir) + except OSError: + if not os.path.isdir(innerdir): + raise + elif not os.path.isdir(innerdir): + msg = 'Intended Astropy directory {0} is actually a file.' + raise IOError(msg.format(innerdir)) + + try: + os.mkdir(maindir) + except OSError: + if not os.path.isdir(maindir): + raise + + if (not sys.platform.startswith('win') and + linkto is not None and + not os.path.exists(linkto)): + os.symlink(maindir, linkto) + + elif not os.path.isdir(maindir): + msg = 'Intended Astropy {0} directory {1} is actually a file.' + raise IOError(msg.format(dirnm, maindir)) + + return os.path.abspath(maindir) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/paths.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/paths.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d19a048db02e49f77df78797e578b8156fcc485d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/paths.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/setup_package.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/setup_package.py new file mode 100644 index 0000000000000000000000000000000000000000..e4a1c0f21cc8b8b2ea399508e279c13df73ec1e8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/setup_package.py @@ -0,0 +1,11 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def get_package_data(): + return { + str('astropy.config.tests'): ['data/*.cfg'] + } + + +def requires_2to3(): + return False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/setup_package.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/setup_package.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39d2c7282a2433b44213bf07354d2056c13e036e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/setup_package.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..800d82e7ee00f69a89739dd3a1c3c6f5e29be442 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/__init__.py @@ -0,0 +1,2 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7c477062a7c4aced83a36cb5c00d779fe14189f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/alias.cfg b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/alias.cfg new file mode 100644 index 0000000000000000000000000000000000000000..612cdd9813d02a41a7b5b92534337bd84175603e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/alias.cfg @@ -0,0 +1,2 @@ +[coordinates.name_resolve] +name_resolve_timeout = 42.0 \ No newline at end of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/astropy.0.3.cfg b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/astropy.0.3.cfg new file mode 100644 index 0000000000000000000000000000000000000000..cafa0e4218b41653f944716fae3f88b8599f263c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/astropy.0.3.cfg @@ -0,0 +1,149 @@ + +# Use Unicode characters when outputting values, and writing widgets to the +# console. +unicode_output = False +[utils.console] + +# When True, use ANSI color escape sequences when writing to the console. +use_color = True + +[logger] + +# Threshold for the logging messages. Logging messages that are less severe +# than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +# 'ERROR' +log_level = INFO + +# Whether to use color for the level names +use_color = True + +# Whether to log warnings.warn calls +log_warnings = True + +# Whether to log exceptions before raising them +log_exceptions = False + +# Whether to always log messages to a log file +log_to_file = False + +# The file to log messages to. When '', it defaults to a file 'astropy.log' in +# the astropy config directory. +log_file_path = "" + +# Threshold for logging messages to log_file_path +log_file_level = INFO + +# Format for log file entries +log_file_format = "%(asctime)r, %(origin)r, %(levelname)r, %(message)r" + +[coordinates.name_resolve] + +# The URL to Sesame's web-queryable database. +sesame_url = http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/, http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/ + +# This specifies the default database that SESAME will query when using the +# name resolve mechanism in the coordinates subpackage. Default is to search +# all databases, but this can be 'all', 'simbad', 'ned', or 'vizier'. +# Options: all, simbad, ned, vizier +sesame_database = all + +# This is the maximum time to wait for a response from a name resolve query to +# SESAME in seconds. +name_resolve_timeout = 5 + +[table.pprint] + +# Maximum number of lines for the pretty-printer to use if it cannot determine +# the terminal size. Negative numbers mean no limit. +max_lines = 25 + +# Maximum number of characters for the pretty-printer to use per line if it +# cannot determine the terminal size. Negative numbers mean no limit. +max_width = 80 + +[table.table] + +# The template that determines the name of a column if it cannot be +# determined. Uses new-style (format method) string formatting +auto_colname = col{0} + +[utils.data] + +# URL for astropy remote data site. +dataurl = http://data.astropy.org/ + +# Time to wait for remote data query (in seconds). +remote_timeout = 3.0 + +# Block size for computing MD5 file hashes. +hash_block_size = 65536 + +# Number of bytes of remote data to download per step. +download_block_size = 65536 + +# Number of times to try to get the lock while accessing the data cache before +# giving up. +download_cache_lock_attempts = 5 + +# If True, temporary download files created when the cache is inacessible will +# be deleted at the end of the python session. +delete_temporary_downloads_at_exit = True + +[io.fits] + +# If True, enable support for record-valued keywords as described by FITS WCS +# Paper IV. Otherwise they are treated as normal keywords. +enabled_record_valued_keyword_cards = True + +# If True, extension names (i.e. the EXTNAME keyword) should be treated as +# case-sensitive. +extension_name_case_sensitive = False + +# If True, automatically remove trailing whitespace for string values in +# headers. Otherwise the values are returned verbatim, with all whitespace +# intact. +strip_header_whitespace = True + +# If True, use memory-mapped file access to read/write the data in FITS files. +# This generally provides better performance, especially for large files, but +# may affect performance in I/O-heavy applications. +use_memmap = True + +[io.votable.table] + +# When True, treat fixable violations of the VOTable spec as exceptions. +pedantic = False + +[cosmology.core] + +# The default cosmology to use. Note this is only read on import, so changing +# this value at runtime has no effect. +default_cosmology = no_default + +[nddata.nddata] + +# Whether to issue a warning if NDData arithmetic is performed with +# uncertainties and the uncertainties do not support the propagation of +# correlated uncertainties. +warn_unsupported_correlated = True + +[vo.client.vos_catalog] + +# URL where VO Service database file is stored. +vos_baseurl = http://stsdas.stsci.edu/astrolib/vo_databases/ + +[vo.client.conesearch] + +# Conesearch database name. +conesearch_dbname = conesearch_good + +[vo.validator.validate] + +# Cone Search services master list for validation. +cs_mstr_list = http://vao.stsci.edu/directory/NVORegInt.asmx/VOTCapabilityPredOpt?predicate=1%3D1&capability=conesearch&VOTStyleOption=2 + +# Only check these Cone Search URLs. +cs_urls = http://archive.noao.edu/nvo/usno.php?cat=a&, http://gsss.stsci.edu/webservices/vo/ConeSearch.aspx?CAT=GSC23&, http://irsa.ipac.caltech.edu/cgi-bin/Oasis/CatSearch/nph-catsearch?CAT=fp_psc&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/220/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/243/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/252/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/254/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/255/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/284/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=II/246/out&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=field&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=photoobjall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=phototag&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=specobjall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=specphotoall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=sppparams&, http://vo.astronet.ru/sai_cas/conesearch?cat=twomass&tab=psc&, http://vo.astronet.ru/sai_cas/conesearch?cat=twomass&tab=xsc&, http://vo.astronet.ru/sai_cas/conesearch?cat=usnoa2&tab=main&, http://vo.astronet.ru/sai_cas/conesearch?cat=usnob1&tab=main&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=Galaxy&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=PhotoObj&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=PhotoObjAll&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=Star&, http://wfaudata.roe.ac.uk/sdssdr8-dsa/DirectCone?DSACAT=SDSS_DR8&DSATAB=PhotoObjAll&, http://wfaudata.roe.ac.uk/sdssdr8-dsa/DirectCone?DSACAT=SDSS_DR8&DSATAB=SpecObjAll&, http://wfaudata.roe.ac.uk/twomass-dsa/DirectCone?DSACAT=TWOMASS&DSATAB=twomass_psc&, http://wfaudata.roe.ac.uk/twomass-dsa/DirectCone?DSACAT=TWOMASS&DSATAB=twomass_xsc&, http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-A2&, http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-B1& + +# VO Table warning codes that are considered non-critical +noncrit_warnings = W03, W06, W07, W09, W10, W15, W17, W20, W21, W22, W27, W28, W29, W41, W42, W48, W50 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/astropy.0.3.windows.cfg b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/astropy.0.3.windows.cfg new file mode 100644 index 0000000000000000000000000000000000000000..589703fc09f9e62fdc4b5b50715af5e8e5ae9bb1 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/astropy.0.3.windows.cfg @@ -0,0 +1,149 @@ + +# Use Unicode characters when outputting values, and writing widgets to the +# console. +unicode_output = False +[utils.console] + +# When True, use ANSI color escape sequences when writing to the console. +use_color = True + +[logger] + +# Threshold for the logging messages. Logging messages that are less severe +# than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +# 'ERROR' +log_level = INFO + +# Whether to use color for the level names +use_color = True + +# Whether to log warnings.warn calls +log_warnings = True + +# Whether to log exceptions before raising them +log_exceptions = False + +# Whether to always log messages to a log file +log_to_file = False + +# The file to log messages to. When '', it defaults to a file 'astropy.log' in +# the astropy config directory. +log_file_path = "" + +# Threshold for logging messages to log_file_path +log_file_level = INFO + +# Format for log file entries +log_file_format = "%(asctime)r, %(origin)r, %(levelname)r, %(message)r" + +[coordinates.name_resolve] + +# The URL to Sesame's web-queryable database. +sesame_url = http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/, http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/ + +# This specifies the default database that SESAME will query when using the +# name resolve mechanism in the coordinates subpackage. Default is to search +# all databases, but this can be 'all', 'simbad', 'ned', or 'vizier'. +# Options: all, simbad, ned, vizier +sesame_database = all + +# This is the maximum time to wait for a response from a name resolve query to +# SESAME in seconds. +name_resolve_timeout = 5 + +[table.pprint] + +# Maximum number of lines for the pretty-printer to use if it cannot determine +# the terminal size. Negative numbers mean no limit. +max_lines = 25 + +# Maximum number of characters for the pretty-printer to use per line if it +# cannot determine the terminal size. Negative numbers mean no limit. +max_width = 80 + +[table.table] + +# The template that determines the name of a column if it cannot be +# determined. Uses new-style (format method) string formatting +auto_colname = col{0} + +[utils.data] + +# URL for astropy remote data site. +dataurl = http://data.astropy.org/ + +# Time to wait for remote data query (in seconds). +remote_timeout = 3.0 + +# Block size for computing MD5 file hashes. +hash_block_size = 65536 + +# Number of bytes of remote data to download per step. +download_block_size = 65536 + +# Number of times to try to get the lock while accessing the data cache before +# giving up. +download_cache_lock_attempts = 5 + +# If True, temporary download files created when the cache is inacessible will +# be deleted at the end of the python session. +delete_temporary_downloads_at_exit = True + +[io.fits] + +# If True, enable support for record-valued keywords as described by FITS WCS +# Paper IV. Otherwise they are treated as normal keywords. +enabled_record_valued_keyword_cards = True + +# If True, extension names (i.e. the EXTNAME keyword) should be treated as +# case-sensitive. +extension_name_case_sensitive = False + +# If True, automatically remove trailing whitespace for string values in +# headers. Otherwise the values are returned verbatim, with all whitespace +# intact. +strip_header_whitespace = True + +# If True, use memory-mapped file access to read/write the data in FITS files. +# This generally provides better performance, especially for large files, but +# may affect performance in I/O-heavy applications. +use_memmap = True + +[io.votable.table] + +# When True, treat fixable violations of the VOTable spec as exceptions. +pedantic = False + +[cosmology.core] + +# The default cosmology to use. Note this is only read on import, so changing +# this value at runtime has no effect. +default_cosmology = no_default + +[nddata.nddata] + +# Whether to issue a warning if NDData arithmetic is performed with +# uncertainties and the uncertainties do not support the propagation of +# correlated uncertainties. +warn_unsupported_correlated = True + +[vo.client.vos_catalog] + +# URL where VO Service database file is stored. +vos_baseurl = http://stsdas.stsci.edu/astrolib/vo_databases/ + +[vo.client.conesearch] + +# Conesearch database name. +conesearch_dbname = conesearch_good + +[vo.validator.validate] + +# Cone Search services master list for validation. +cs_mstr_list = http://vao.stsci.edu/directory/NVORegInt.asmx/VOTCapabilityPredOpt?predicate=1%3D1&capability=conesearch&VOTStyleOption=2 + +# Only check these Cone Search URLs. +cs_urls = http://archive.noao.edu/nvo/usno.php?cat=a&, http://gsss.stsci.edu/webservices/vo/ConeSearch.aspx?CAT=GSC23&, http://irsa.ipac.caltech.edu/cgi-bin/Oasis/CatSearch/nph-catsearch?CAT=fp_psc&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/220/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/243/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/252/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/254/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/255/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/284/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=II/246/out&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=field&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=photoobjall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=phototag&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=specobjall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=specphotoall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=sppparams&, http://vo.astronet.ru/sai_cas/conesearch?cat=twomass&tab=psc&, http://vo.astronet.ru/sai_cas/conesearch?cat=twomass&tab=xsc&, http://vo.astronet.ru/sai_cas/conesearch?cat=usnoa2&tab=main&, http://vo.astronet.ru/sai_cas/conesearch?cat=usnob1&tab=main&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=Galaxy&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=PhotoObj&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=PhotoObjAll&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=Star&, http://wfaudata.roe.ac.uk/sdssdr8-dsa/DirectCone?DSACAT=SDSS_DR8&DSATAB=PhotoObjAll&, http://wfaudata.roe.ac.uk/sdssdr8-dsa/DirectCone?DSACAT=SDSS_DR8&DSATAB=SpecObjAll&, http://wfaudata.roe.ac.uk/twomass-dsa/DirectCone?DSACAT=TWOMASS&DSATAB=twomass_psc&, http://wfaudata.roe.ac.uk/twomass-dsa/DirectCone?DSACAT=TWOMASS&DSATAB=twomass_xsc&, http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-A2&, http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-B1& + +# VO Table warning codes that are considered non-critical +noncrit_warnings = W03, W06, W07, W09, W10, W15, W17, W20, W21, W22, W27, W28, W29, W41, W42, W48, W50 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/deprecated.cfg b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/deprecated.cfg new file mode 100644 index 0000000000000000000000000000000000000000..a6cb084a3e9644cd35d15e783f2a84e570127498 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/deprecated.cfg @@ -0,0 +1,2 @@ +[table.pprint] +max_lines = 25 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/empty.cfg b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/empty.cfg new file mode 100644 index 0000000000000000000000000000000000000000..a069dfd547ee4d71b92b952cd80021ff9c16f5e5 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/empty.cfg @@ -0,0 +1,15 @@ +## Use Unicode characters when outputting values, and writing widgets to the +## console. +#unicode_output = False + +[utils.console] + +## When True, use ANSI color escape sequences when writing to the console. +# use_color = True + +[logger] + +## Threshold for the logging messages. Logging messages that are less severe +## than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +## 'ERROR' +# log_level = INFO diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/not_empty.cfg b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/not_empty.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c7a660f870174bcd243b89a245e2ccc9fd4097a8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/data/not_empty.cfg @@ -0,0 +1,15 @@ +## Use Unicode characters when outputting values, and writing widgets to the +## console. +#unicode_output = False + +[utils.console] + +## When True, use ANSI color escape sequences when writing to the console. +# use_color = True + +[logger] + +## Threshold for the logging messages. Logging messages that are less severe +## than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +## 'ERROR' +log_level = INFO diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/test_configs.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/test_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..25e51cfccafc5dc463530851066aa961c2bf1c2d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/test_configs.py @@ -0,0 +1,358 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import io +import os +import sys +import subprocess + +import pytest + +from ...tests.helper import catch_warnings +from ...extern import six + +from ...utils.data import get_pkg_data_filename +from .. import configuration +from .. import paths +from ...utils.exceptions import AstropyDeprecationWarning + + +def test_paths(): + assert 'astropy' in paths.get_config_dir() + assert 'astropy' in paths.get_cache_dir() + + +def test_set_temp_config(tmpdir, monkeypatch): + monkeypatch.setattr(paths.set_temp_config, '_temp_path', None) + + orig_config_dir = paths.get_config_dir() + temp_config_dir = str(tmpdir.mkdir('config')) + temp_astropy_config = os.path.join(temp_config_dir, 'astropy') + + # Test decorator mode + @paths.set_temp_config(temp_config_dir) + def test_func(): + assert paths.get_config_dir() == temp_astropy_config + + # Test temporary restoration of original default + with paths.set_temp_config() as d: + assert d == orig_config_dir == paths.get_config_dir() + + test_func() + + # Test context manager mode (with cleanup) + with paths.set_temp_config(temp_config_dir, delete=True): + assert paths.get_config_dir() == temp_astropy_config + + assert not os.path.exists(temp_config_dir) + + +def test_set_temp_cache(tmpdir, monkeypatch): + monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None) + + orig_cache_dir = paths.get_cache_dir() + temp_cache_dir = str(tmpdir.mkdir('cache')) + temp_astropy_cache = os.path.join(temp_cache_dir, 'astropy') + + # Test decorator mode + @paths.set_temp_cache(temp_cache_dir) + def test_func(): + assert paths.get_cache_dir() == temp_astropy_cache + + # Test temporary restoration of original default + with paths.set_temp_cache() as d: + assert d == orig_cache_dir == paths.get_cache_dir() + + test_func() + + # Test context manager mode (with cleanup) + with paths.set_temp_cache(temp_cache_dir, delete=True): + assert paths.get_cache_dir() == temp_astropy_cache + + assert not os.path.exists(temp_cache_dir) + + +def test_config_file(): + from ..configuration import get_config, reload_config + + apycfg = get_config('astropy') + assert apycfg.filename.endswith('astropy.cfg') + + cfgsec = get_config('astropy.config') + assert cfgsec.depth == 1 + assert cfgsec.name == 'config' + assert cfgsec.parent.filename.endswith('astropy.cfg') + + reload_config('astropy') + + +def test_configitem(): + + from ..configuration import ConfigNamespace, ConfigItem, get_config + + ci = ConfigItem(34, 'this is a Description') + + class Conf(ConfigNamespace): + tstnm = ci + + conf = Conf() + + assert ci.module == 'astropy.config.tests.test_configs' + assert ci() == 34 + assert ci.description == 'this is a Description' + + assert conf.tstnm == 34 + + sec = get_config(ci.module) + assert sec['tstnm'] == 34 + + ci.description = 'updated Descr' + ci.set(32) + assert ci() == 32 + + # It's useful to go back to the default to allow other test functions to + # call this one and still be in the default configuration. + ci.description = 'this is a Description' + ci.set(34) + assert ci() == 34 + + +def test_configitem_types(): + + from ..configuration import ConfigNamespace, ConfigItem + + cio = ConfigItem(['op1', 'op2', 'op3']) + + class Conf(ConfigNamespace): + tstnm1 = ConfigItem(34) + tstnm2 = ConfigItem(34.3) + tstnm3 = ConfigItem(True) + tstnm4 = ConfigItem('astring') + + conf = Conf() + + assert isinstance(conf.tstnm1, int) + assert isinstance(conf.tstnm2, float) + assert isinstance(conf.tstnm3, bool) + assert isinstance(conf.tstnm4, six.text_type) + + with pytest.raises(TypeError): + conf.tstnm1 = 34.3 + conf.tstnm2 = 12 # this would should succeed as up-casting + with pytest.raises(TypeError): + conf.tstnm3 = 'fasd' + with pytest.raises(TypeError): + conf.tstnm4 = 546.245 + + +def test_configitem_options(tmpdir): + + from ..configuration import ConfigNamespace, ConfigItem, get_config + + cio = ConfigItem(['op1', 'op2', 'op3']) + + class Conf(ConfigNamespace): + tstnmo = cio + + conf = Conf() + + sec = get_config(cio.module) + + assert isinstance(cio(), six.text_type) + assert cio() == 'op1' + assert sec['tstnmo'] == 'op1' + + cio.set('op2') + with pytest.raises(TypeError): + cio.set('op5') + assert sec['tstnmo'] == 'op2' + + # now try saving + apycfg = sec + while apycfg.parent is not apycfg: + apycfg = apycfg.parent + f = tmpdir.join('astropy.cfg') + with io.open(f.strpath, 'wb') as fd: + apycfg.write(fd) + with io.open(f.strpath, 'r', encoding='utf-8') as fd: + lns = [x.strip() for x in f.readlines()] + + assert 'tstnmo = op2' in lns + + +def test_config_noastropy_fallback(monkeypatch): + """ + Tests to make sure configuration items fall back to their defaults when + there's a problem accessing the astropy directory + """ + + # make sure the config directory is not searched + monkeypatch.setenv(str('XDG_CONFIG_HOME'), 'foo') + monkeypatch.delenv(str('XDG_CONFIG_HOME')) + monkeypatch.setattr(paths.set_temp_config, '_temp_path', None) + + # make sure the _find_or_create_astropy_dir function fails as though the + # astropy dir could not be accessed + def osraiser(dirnm, linkto): + raise OSError + monkeypatch.setattr(paths, '_find_or_create_astropy_dir', osraiser) + + # also have to make sure the stored configuration objects are cleared + monkeypatch.setattr(configuration, '_cfgobjs', {}) + + with pytest.raises(OSError): + # make sure the config dir search fails + paths.get_config_dir() + + # now run the basic tests, and make sure the warning about no astropy + # is present + with catch_warnings(configuration.ConfigurationMissingWarning) as w: + test_configitem() + assert len(w) == 1 + w = w[0] + assert 'Configuration defaults will be used' in str(w.message) + + +def test_configitem_setters(): + + from ..configuration import ConfigNamespace, ConfigItem + + class Conf(ConfigNamespace): + tstnm12 = ConfigItem(42, 'this is another Description') + + conf = Conf() + + assert conf.tstnm12 == 42 + with conf.set_temp('tstnm12', 45): + assert conf.tstnm12 == 45 + assert conf.tstnm12 == 42 + + conf.tstnm12 = 43 + assert conf.tstnm12 == 43 + + with conf.set_temp('tstnm12', 46): + assert conf.tstnm12 == 46 + + # Make sure it is reset even with Exception + try: + with conf.set_temp('tstnm12', 47): + raise Exception + except Exception: + pass + + assert conf.tstnm12 == 43 + + +def test_empty_config_file(): + from ..configuration import is_unedited_config_file + + def get_content(fn): + with io.open(get_pkg_data_filename(fn), 'rt', encoding='latin-1') as fd: + return fd.read() + + content = get_content('data/empty.cfg') + assert is_unedited_config_file(content) + + content = get_content('data/not_empty.cfg') + assert not is_unedited_config_file(content) + + content = get_content('data/astropy.0.3.cfg') + assert is_unedited_config_file(content) + + content = get_content('data/astropy.0.3.windows.cfg') + assert is_unedited_config_file(content) + + +class TestAliasRead(object): + + def setup_class(self): + configuration._override_config_file = get_pkg_data_filename('data/alias.cfg') + + def test_alias_read(self): + from astropy.utils.data import conf + + with catch_warnings() as w: + conf.reload() + assert conf.remote_timeout == 42 + + assert len(w) == 1 + assert str(w[0].message).startswith( + "Config parameter 'name_resolve_timeout' in section " + "[coordinates.name_resolve]") + + def teardown_class(self): + from astropy.utils.data import conf + + configuration._override_config_file = None + conf.reload() + + +def test_configitem_unicode(tmpdir): + + from ..configuration import ConfigNamespace, ConfigItem, get_config + + cio = ConfigItem('ასტრონომიის') + + class Conf(ConfigNamespace): + tstunicode = cio + + conf = Conf() + + sec = get_config(cio.module) + + assert isinstance(cio(), six.text_type) + assert cio() == 'ასტრონომიის' + assert sec['tstunicode'] == 'ასტრონომიის' + + +def test_warning_move_to_top_level(): + # Check that the warning about deprecation config items in the + # file works. See #2514 + from ... import conf + + configuration._override_config_file = get_pkg_data_filename('data/deprecated.cfg') + + try: + with catch_warnings(AstropyDeprecationWarning) as w: + conf.reload() + conf.max_lines + assert len(w) == 1 + finally: + configuration._override_config_file = None + conf.reload() + + +def test_no_home(): + # "import astropy" fails when neither $HOME or $XDG_CONFIG_HOME + # are set. To test, we unset those environment variables for a + # subprocess and try to import astropy. + + test_path = os.path.dirname(__file__) + astropy_path = os.path.abspath( + os.path.join(test_path, '..', '..', '..')) + + env = os.environ.copy() + paths = [astropy_path] + if env.get('PYTHONPATH'): + paths.append(env.get('PYTHONPATH')) + env[str('PYTHONPATH')] = str(os.pathsep.join(paths)) + + for val in ['HOME', 'XDG_CONFIG_HOME']: + if val in env: + del env[val] + + retcode = subprocess.check_call( + [sys.executable, '-c', 'import astropy'], + env=env) + + assert retcode == 0 + + +def test_unedited_template(): + # Test that the config file is written at most once + config_dir = os.path.join(os.path.dirname(__file__), '..', '..') + configuration.update_default_config('astropy', config_dir) + assert configuration.update_default_config('astropy', config_dir) is False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/test_configs.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/test_configs.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61f8b35a657aef9e856d7aa5ba58a5ee026348b0 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/config/tests/test_configs.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/conftest.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..8694a58c7e46fbab2527ce8dc9de17301ef2e97e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/conftest.py @@ -0,0 +1,44 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This file contains pytest configuration settings that are astropy-specific +(i.e. those that would not necessarily be shared by affiliated packages +making use of astropy's test runner). +""" +from .extern.six.moves import builtins + +from .tests.pytest_plugins import * + +try: + import matplotlib +except ImportError: + HAS_MATPLOTLIB = False +else: + HAS_MATPLOTLIB = True + +enable_deprecations_as_exceptions(include_astropy_deprecations=False) + + +if HAS_MATPLOTLIB: + matplotlib.use('Agg') + + +matplotlibrc_cache = {} + + +def pytest_configure(config): + builtins._pytest_running = True + # do not assign to matplotlibrc_cache in function scope + if HAS_MATPLOTLIB: + matplotlibrc_cache.update(matplotlib.rcParams) + matplotlib.rcdefaults() + + +def pytest_unconfigure(config): + builtins._pytest_running = False + # do not assign to matplotlibrc_cache in function scope + if HAS_MATPLOTLIB: + matplotlib.rcParams.update(matplotlibrc_cache) + matplotlibrc_cache.clear() + + +PYTEST_HEADER_MODULES['Cython'] = 'cython' diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/conftest.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/conftest.pyc new file mode 100644 index 0000000000000000000000000000000000000000..414d1555f6001fe9e003f3fa193fbb12fb23b4fa Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/conftest.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..24ee254807250e2e8b9b71dbd15571285bde81d8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/__init__.py @@ -0,0 +1,56 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Contains astronomical and physical constants for use in Astropy or other +places. + +A typical use case might be:: + + >>> from astropy.constants import c, m_e + >>> # ... define the mass of something you want the rest energy of as m ... + >>> m = m_e + >>> E = m * c**2 + >>> E.to('MeV') # doctest: +FLOAT_CMP + + +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +# Hack to make circular imports with units work +try: + from .. import units + del units +except ImportError: + pass + +from .constant import Constant, EMConstant +from . import si +from . import cgs +from . import codata2014, iau2015 + +# for updating the constants module docstring +_lines = [ + 'The following constants are available:\n', + '========== ============== ================ =========================', + ' Name Value Unit Description', + '========== ============== ================ =========================', +] + +for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), + sorted(vars(iau2015).items())): + if isinstance(_c, Constant) and _c.abbrev not in locals(): + locals()[_c.abbrev] = _c.__class__(_c.abbrev, _c.name, _c.value, + _c._unit_string, _c.uncertainty, + _c.reference) + + _lines.append('{0:^10} {1:^14.9g} {2:^16} {3}'.format( + _c.abbrev, _c.value, _c._unit_string, _c.name)) + +_lines.append(_lines[1]) + +if __doc__ is not None: + __doc__ += '\n'.join(_lines) + +del _lines, _nm, _c diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65b83488fbee2ae1e44252e48286159587e31eac Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst13.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst13.py new file mode 100644 index 0000000000000000000000000000000000000000..c140c0c181dec4f8b3fe91ed536d0960611feb36 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst13.py @@ -0,0 +1,20 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants for Astropy v1.3 and earlier. +See :mod:`astropy.constants` for a complete listing of constants +defined in Astropy. +""" + + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +from .constant import Constant +from . import codata2010, iau2012 + +for _nm, _c in itertools.chain(sorted(vars(codata2010).items()), + sorted(vars(iau2012).items())): + if (isinstance(_c, Constant) and _c.abbrev not in locals()): + locals()[_c.abbrev] = _c diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst13.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst13.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d635b01b9ff830c7db02e9d08b9e3ff3d171739b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst13.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst20.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst20.py new file mode 100644 index 0000000000000000000000000000000000000000..89d1dd3fa4feb71870a255689945c39a48d86670 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst20.py @@ -0,0 +1,19 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants for Astropy v2.0. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" + + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +from .constant import Constant +from . import codata2014, iau2015 + +for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), + sorted(vars(iau2015).items())): + if (isinstance(_c, Constant) and _c.abbrev not in locals()): + locals()[_c.abbrev] = _c diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst20.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst20.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52c4e9cddb4bafe376971baa0bc7f49e9ec68f20 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/astropyconst20.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/cgs.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/cgs.py new file mode 100644 index 0000000000000000000000000000000000000000..ab681e33635d7d8abf2120ff470692c888e51537 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/cgs.py @@ -0,0 +1,18 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in cgs units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +from .constant import Constant +from . import codata2014, iau2015 + +for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), + sorted(vars(iau2015).items())): + if (isinstance(_c, Constant) and _c.abbrev not in locals() + and _c.system in ['esu', 'gauss', 'emu']): + locals()[_c.abbrev] = _c diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/cgs.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/cgs.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76e08a2451fa32414f473a131c4036550d22abdb Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/cgs.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2010.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2010.py new file mode 100644 index 0000000000000000000000000000000000000000..353aefe89293ff69869e089154b42aff09036792 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2010.py @@ -0,0 +1,112 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .constant import Constant, EMConstant + + +# PHYSICAL CONSTANTS + +class CODATA2010(Constant): + default_reference = 'CODATA 2010' + _registry = {} + _has_incompatible_units = set() + + def __new__(cls, abbrev, name, value, unit, uncertainty, + reference=default_reference, system=None): + return(super(CODATA2010, cls).__new__(cls, abbrev, name, value, unit, + uncertainty, reference, system)) + + +class EMCODATA2010(CODATA2010, EMConstant): + _registry = CODATA2010._registry + + +h = CODATA2010('h', "Planck constant", 6.62606957e-34, 'J s', + 0.00000029e-34, system='si') + +hbar = CODATA2010('hbar', "Reduced Planck constant", + h.value * 0.5 / np.pi, 'J s', + h.uncertainty * 0.5 / np.pi, + h.reference, system='si') + +k_B = CODATA2010('k_B', "Boltzmann constant", 1.3806488e-23, 'J / (K)', + 0.0000013e-23, system='si') + +c = CODATA2010('c', "Speed of light in vacuum", 2.99792458e8, 'm / (s)', 0., + system='si') + +G = CODATA2010('G', "Gravitational constant", 6.67384e-11, 'm3 / (kg s2)', + 0.00080e-11, system='si') + +g0 = CODATA2010('g0', "Standard acceleration of gravity", 9.80665, 'm / s2', 0.0, + system='si') + +m_p = CODATA2010('m_p', "Proton mass", 1.672621777e-27, 'kg', 0.000000074e-27, + system='si') + +m_n = CODATA2010('m_n', "Neutron mass", 1.674927351e-27, 'kg', 0.000000074e-27, + system='si') + +m_e = CODATA2010('m_e', "Electron mass", 9.10938291e-31, 'kg', 0.00000040e-31, + system='si') + +u = CODATA2010('u', "Atomic mass", 1.660538921e-27, 'kg', 0.000000073e-27, + system='si') + +sigma_sb = CODATA2010('sigma_sb', "Stefan-Boltzmann constant", 5.670373e-8, + 'W / (K4 m2)', 0.000021e-8, system='si') + +e = EMCODATA2010('e', 'Electron charge', 1.602176565e-19, 'C', 0.000000035e-19, + system='si') + +eps0 = EMCODATA2010('eps0', 'Electric constant', 8.854187817e-12, 'F/m', 0.0, + system='si') + +N_A = CODATA2010('N_A', "Avogadro's number", 6.02214129e23, '1 / (mol)', + 0.00000027e23, system='si') + +R = CODATA2010('R', "Gas constant", 8.3144621, 'J / (K mol)', 0.0000075, + system='si') + +Ryd = CODATA2010('Ryd', 'Rydberg constant', 10973731.568539, '1 / (m)', + 0.000055, system='si') + +a0 = CODATA2010('a0', "Bohr radius", 0.52917721092e-10, 'm', 0.00000000017e-10, + system='si') + +muB = CODATA2010('muB', "Bohr magneton", 927.400968e-26, 'J/T', 0.00002e-26, + system='si') + +alpha = CODATA2010('alpha', "Fine-structure constant", 7.2973525698e-3, + '', 0.0000000024e-3, system='si') + +atm = CODATA2010('atm', "Standard atmosphere", 101325, 'Pa', 0.0, + system='si') + +mu0 = CODATA2010('mu0', "Magnetic constant", 4.0e-7 * np.pi, 'N/A2', 0.0, + system='si') + +sigma_T = CODATA2010('sigma_T', "Thomson scattering cross-section", + 0.6652458734e-28, 'm2', 0.0000000013e-28, system='si') + +b_wien = Constant('b_wien', 'Wien wavelength displacement law constant', + 2.8977721e-3, 'm K', 0.0000026e-3, 'CODATA 2010', system='si') + +# cgs constants +# Only constants that cannot be converted directly from S.I. are defined here. + +e_esu = EMCODATA2010(e.abbrev, e.name, e.value * c.value * 10.0, + 'statC', e.uncertainty * c.value * 10.0, system='esu') + +e_emu = EMCODATA2010(e.abbrev, e.name, e.value / 10, 'abC', + e.uncertainty / 10, system='emu') + +e_gauss = EMCODATA2010(e.abbrev, e.name, e.value * c.value * 10.0, + 'Fr', e.uncertainty * c.value * 10.0, system='gauss') diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2010.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2010.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6512a628eee20805b2629729cb8169713b8c657b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2010.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2014.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2014.py new file mode 100644 index 0000000000000000000000000000000000000000..19b33f33f3fa59d83e1b097d28c410bd662b0f13 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2014.py @@ -0,0 +1,107 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .constant import Constant, EMConstant + + +# PHYSICAL CONSTANTS + +class CODATA2014(Constant): + default_reference = 'CODATA 2014' + _registry = {} + _has_incompatible_units = set() + + +class EMCODATA2014(CODATA2014, EMConstant): + _registry = CODATA2014._registry + + +h = CODATA2014('h', "Planck constant", 6.626070040e-34, + 'J s', 0.000000081e-34, system='si') + +hbar = CODATA2014('hbar', "Reduced Planck constant", 1.054571800e-34, + 'J s', 0.000000013e-34, system='si') + +k_B = CODATA2014('k_B', "Boltzmann constant", 1.38064852e-23, + 'J / (K)', 0.00000079e-23, system='si') + +c = CODATA2014('c', "Speed of light in vacuum", 299792458., + 'm / (s)', 0.0, system='si') + + +G = CODATA2014('G', "Gravitational constant", 6.67408e-11, + 'm3 / (kg s2)', 0.00031e-11, system='si') + +g0 = CODATA2014('g0', "Standard acceleration of gravity", 9.80665, + 'm / s2', 0.0, system='si') + +m_p = CODATA2014('m_p', "Proton mass", 1.672621898e-27, + 'kg', 0.000000021e-27, system='si') + +m_n = CODATA2014('m_n', "Neutron mass", 1.674927471e-27, + 'kg', 0.000000021e-27, system='si') + +m_e = CODATA2014('m_e', "Electron mass", 9.10938356e-31, + 'kg', 0.00000011e-31, system='si') + +u = CODATA2014('u', "Atomic mass", 1.660539040e-27, + 'kg', 0.000000020e-27, system='si') + +sigma_sb = CODATA2014('sigma_sb', "Stefan-Boltzmann constant", 5.670367e-8, + 'W / (K4 m2)', 0.000013e-8, system='si') + +e = EMCODATA2014('e', 'Electron charge', 1.6021766208e-19, + 'C', 0.0000000098e-19, system='si') + +eps0 = EMCODATA2014('eps0', 'Electric constant', 8.854187817e-12, + 'F/m', 0.0, system='si') + +N_A = CODATA2014('N_A', "Avogadro's number", 6.022140857e23, + '1 / (mol)', 0.000000074e23, system='si') + +R = CODATA2014('R', "Gas constant", 8.3144598, + 'J / (K mol)', 0.0000048, system='si') + +Ryd = CODATA2014('Ryd', 'Rydberg constant', 10973731.568508, + '1 / (m)', 0.000065, system='si') + +a0 = CODATA2014('a0', "Bohr radius", 0.52917721067e-10, + 'm', 0.00000000012e-10, system='si') + +muB = CODATA2014('muB', "Bohr magneton", 927.4009994e-26, + 'J/T', 0.00002e-26, system='si') + +alpha = CODATA2014('alpha', "Fine-structure constant", 7.2973525664e-3, + '', 0.0000000017e-3, system='si') + +atm = CODATA2014('atm', "Standard atmosphere", 101325, + 'Pa', 0.0, system='si') + +mu0 = CODATA2014('mu0', "Magnetic constant", 4.0e-7 * np.pi, 'N/A2', 0.0, + system='si') + +sigma_T = CODATA2014('sigma_T', "Thomson scattering cross-section", + 0.66524587158e-28, 'm2', 0.00000000091e-28, + system='si') + +b_wien = CODATA2014('b_wien', 'Wien wavelength displacement law constant', + 2.8977729e-3, 'm K', 00.0000017e-3, system='si') + +# cgs constants +# Only constants that cannot be converted directly from S.I. are defined here. + +e_esu = EMCODATA2014(e.abbrev, e.name, e.value * c.value * 10.0, + 'statC', e.uncertainty * c.value * 10.0, system='esu') + +e_emu = EMCODATA2014(e.abbrev, e.name, e.value / 10, 'abC', + e.uncertainty / 10, system='emu') + +e_gauss = EMCODATA2014(e.abbrev, e.name, e.value * c.value * 10.0, + 'Fr', e.uncertainty * c.value * 10.0, system='gauss') diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2014.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2014.pyc new file mode 100644 index 0000000000000000000000000000000000000000..534773ac81d0ac759941cfb068f42c63e8d24a14 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/codata2014.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/constant.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/constant.py new file mode 100644 index 0000000000000000000000000000000000000000..1d0c05dece71fcb8b437d69a2d86f7d18fed2739 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/constant.py @@ -0,0 +1,237 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six + +import functools +import types +import warnings +import numpy as np + +from ..units.core import Unit, UnitsError +from ..units.quantity import Quantity +from ..utils import lazyproperty +from ..utils.exceptions import AstropyUserWarning +from ..utils.misc import InheritDocstrings + +__all__ = ['Constant', 'EMConstant'] + + +class ConstantMeta(InheritDocstrings): + """Metaclass for the :class:`Constant`. The primary purpose of this is to + wrap the double-underscore methods of :class:`Quantity` which is the + superclass of :class:`Constant`. + + In particular this wraps the operator overloads such as `__add__` to + prevent their use with constants such as ``e`` from being used in + expressions without specifying a system. The wrapper checks to see if the + constant is listed (by name) in ``Constant._has_incompatible_units``, a set + of those constants that are defined in different systems of units are + physically incompatible. It also performs this check on each `Constant` if + it hasn't already been performed (the check is deferred until the + `Constant` is actually used in an expression to speed up import times, + among other reasons). + """ + + def __new__(mcls, name, bases, d): + def wrap(meth): + @functools.wraps(meth) + def wrapper(self, *args, **kwargs): + name_lower = self.name.lower() + instances = self._registry[name_lower] + if not self._checked_units: + for inst in six.itervalues(instances): + try: + self.unit.to(inst.unit) + except UnitsError: + self._has_incompatible_units.add(name_lower) + self._checked_units = True + + if (not self.system and + name_lower in self._has_incompatible_units): + systems = sorted([x for x in instances if x]) + raise TypeError( + 'Constant {0!r} does not have physically compatible ' + 'units across all systems of units and cannot be ' + 'combined with other values without specifying a ' + 'system (eg. {1}.{2})'.format(self.abbrev, self.abbrev, + systems[0])) + + return meth(self, *args, **kwargs) + + return wrapper + + # The wrapper applies to so many of the __ methods that it's easier to + # just exclude the ones it doesn't apply to + exclude = set(['__new__', '__array_finalize__', '__array_wrap__', + '__dir__', '__getattr__', '__init__', '__str__', + '__repr__', '__hash__', '__iter__', '__getitem__', + '__len__', '__nonzero__', '__quantity_subclass__']) + for attr, value in six.iteritems(vars(Quantity)): + if (isinstance(value, types.FunctionType) and + attr.startswith('__') and attr.endswith('__') and + attr not in exclude): + d[attr] = wrap(value) + + return super(ConstantMeta, mcls).__new__(mcls, name, bases, d) + + +@six.add_metaclass(ConstantMeta) +class Constant(Quantity): + """A physical or astronomical constant. + + These objects are quantities that are meant to represent physical + constants. + """ + _registry = {} + _has_incompatible_units = set() + + def __new__(cls, abbrev, name, value, unit, uncertainty, + reference=None, system=None): + if reference is None: + reference = getattr(cls, 'default_reference', None) + if reference is None: + raise TypeError("{} requires a reference.".format(cls)) + name_lower = name.lower() + instances = cls._registry.setdefault(name_lower, {}) + # By-pass Quantity initialization, since units may not yet be + # initialized here, and we store the unit in string form. + inst = np.array(value).view(cls) + + if system in instances: + warnings.warn('Constant {0!r} already has a definition in the ' + '{1!r} system from {2!r} reference'.format( + name, system, reference), AstropyUserWarning) + for c in six.itervalues(instances): + if system is not None and not hasattr(c.__class__, system): + setattr(c, system, inst) + if c.system is not None and not hasattr(inst.__class__, c.system): + setattr(inst, c.system, c) + + instances[system] = inst + + inst._abbrev = abbrev + inst._name = name + inst._value = value + inst._unit_string = unit + inst._uncertainty = uncertainty + inst._reference = reference + inst._system = system + + inst._checked_units = False + return inst + + def __repr__(self): + return ('<{0} name={1!r} value={2} uncertainty={3} unit={4!r} ' + 'reference={5!r}>'.format(self.__class__, self.name, self.value, + self.uncertainty, str(self.unit), + self.reference)) + + def __str__(self): + return (' Name = {0}\n' + ' Value = {1}\n' + ' Uncertainty = {2}\n' + ' Unit = {3}\n' + ' Reference = {4}'.format(self.name, self.value, + self.uncertainty, self.unit, + self.reference)) + + def __quantity_subclass__(self, unit): + return super(Constant, self).__quantity_subclass__(unit)[0], False + + def copy(self): + """ + Return a copy of this `Constant` instance. Since they are by + definition immutable, this merely returns another reference to + ``self``. + """ + return self + __deepcopy__ = __copy__ = copy + + @property + def abbrev(self): + """A typical ASCII text abbreviation of the constant, also generally + the same as the Python variable used for this constant. + """ + + return self._abbrev + + @property + def name(self): + """The full name of the constant.""" + + return self._name + + @lazyproperty + def _unit(self): + """The unit(s) in which this constant is defined.""" + + return Unit(self._unit_string) + + @property + def uncertainty(self): + """The known uncertainty in this constant's value.""" + + return self._uncertainty + + @property + def reference(self): + """The source used for the value of this constant.""" + + return self._reference + + @property + def system(self): + """The system of units in which this constant is defined (typically + `None` so long as the constant's units can be directly converted + between systems). + """ + + return self._system + + def _instance_or_super(self, key): + instances = self._registry[self.name.lower()] + inst = instances.get(key) + if inst is not None: + return inst + else: + return getattr(super(Constant, self), key) + + @property + def si(self): + """If the Constant is defined in the SI system return that instance of + the constant, else convert to a Quantity in the appropriate SI units. + """ + + return self._instance_or_super('si') + + @property + def cgs(self): + """If the Constant is defined in the CGS system return that instance of + the constant, else convert to a Quantity in the appropriate CGS units. + """ + + return self._instance_or_super('cgs') + + def __array_finalize__(self, obj): + for attr in ('_abbrev', '_name', '_value', '_unit_string', + '_uncertainty', '_reference', '_system'): + setattr(self, attr, getattr(obj, attr, None)) + + self._checked_units = getattr(obj, '_checked_units', False) + + +class EMConstant(Constant): + """An electromagnetic constant.""" + + @property + def cgs(self): + """Overridden for EMConstant to raise a `TypeError` + emphasizing that there are multiple EM extensions to CGS. + """ + + raise TypeError("Cannot convert EM constants to cgs because there " + "are different systems for E.M constants within the " + "c.g.s system (ESU, Gaussian, etc.). Instead, " + "directly use the constant with the appropriate " + "suffix (e.g. e.esu, e.gauss, etc.).") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/constant.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/constant.pyc new file mode 100644 index 0000000000000000000000000000000000000000..537891a900bd8428a876d2fc1ff0b356d43e402e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/constant.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2012.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2012.py new file mode 100644 index 0000000000000000000000000000000000000000..4c8ed6777ce9b86380f28974999274e192bc6978 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2012.py @@ -0,0 +1,78 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .constant import Constant + +# ASTRONOMICAL CONSTANTS + + +class IAU2012(Constant): + default_reference = 'IAU 2012' + _registry = {} + _has_incompatible_units = set() + + +# DISTANCE + +# Astronomical Unit +au = IAU2012('au', "Astronomical Unit", 1.49597870700e11, 'm', 0.0, + "IAU 2012 Resolution B2", system='si') + +# Parsec + +pc = IAU2012('pc', "Parsec", au.value / np.tan(np.radians(1. / 3600.)), 'm', + au.uncertainty / np.tan(np.radians(1. / 3600.)), + "Derived from au", system='si') + +# Kiloparsec +kpc = IAU2012('kpc', "Kiloparsec", + 1000. * au.value / np.tan(np.radians(1. / 3600.)), 'm', + 1000. * au.uncertainty / np.tan(np.radians(1. / 3600.)), + "Derived from au", system='si') + +# Luminosity +L_bol0 = IAU2012('L_bol0', "Luminosity for absolute bolometric magnitude 0", + 3.0128e28, "W", 0.0, "IAU 2015 Resolution B 2", system='si') + + +# SOLAR QUANTITIES + +# Solar luminosity +L_sun = IAU2012('L_sun', "Solar luminosity", 3.846e26, 'W', 0.0005e26, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + +# Solar mass +M_sun = IAU2012('M_sun', "Solar mass", 1.9891e30, 'kg', 0.00005e30, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + +# Solar radius +R_sun = IAU2012('R_sun', "Solar radius", 6.95508e8, 'm', 0.00026e8, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + + +# OTHER SOLAR SYSTEM QUANTITIES + +# Jupiter mass +M_jup = IAU2012('M_jup', "Jupiter mass", 1.8987e27, 'kg', 0.00005e27, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + +# Jupiter equatorial radius +R_jup = IAU2012('R_jup', "Jupiter equatorial radius", 7.1492e7, 'm', + 0.00005e7, "Allen's Astrophysical Quantities 4th Ed.", + system='si') + +# Earth mass +M_earth = IAU2012('M_earth', "Earth mass", 5.9742e24, 'kg', 0.00005e24, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + +# Earth equatorial radius +R_earth = IAU2012('R_earth', "Earth equatorial radius", 6.378136e6, 'm', + 0.0000005e6, "Allen's Astrophysical Quantities 4th Ed.", + system='si') diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2012.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2012.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf2433820bf35c4faef4ba8c1840677fa230a44f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2012.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2015.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2015.py new file mode 100644 index 0000000000000000000000000000000000000000..7ae11cad53926ce2131f0053651be1af026fe13f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2015.py @@ -0,0 +1,96 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .constant import Constant +from .codata2014 import G + +# ASTRONOMICAL CONSTANTS + + +class IAU2015(Constant): + default_reference = 'IAU 2015' + _registry = {} + _has_incompatible_units = set() + + +# DISTANCE + +# Astronomical Unit +au = IAU2015('au', "Astronomical Unit", 1.49597870700e11, 'm', 0.0, + "IAU 2012 Resolution B2", system='si') + +# Parsec + +pc = IAU2015('pc', "Parsec", au.value / np.tan(np.radians(1. / 3600.)), 'm', + au.uncertainty / np.tan(np.radians(1. / 3600.)), + "Derived from au", system='si') + +# Kiloparsec +kpc = IAU2015('kpc', "Kiloparsec", + 1000. * au.value / np.tan(np.radians(1. / 3600.)), 'm', + 1000. * au.uncertainty / np.tan(np.radians(1. / 3600.)), + "Derived from au", system='si') + +# Luminosity +L_bol0 = IAU2015('L_bol0', "Luminosity for absolute bolometric magnitude 0", + 3.0128e28, "W", 0.0, "IAU 2015 Resolution B 2", system='si') + + +# SOLAR QUANTITIES + +# Solar luminosity +L_sun = IAU2015('L_sun', "Nominal solar luminosity", 3.828e26, + 'W', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Solar mass parameter +GM_sun = IAU2015('GM_sun', 'Nominal solar mass parameter', 1.3271244e20, + 'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Solar mass (derived from mass parameter and gravitational constant) +M_sun = IAU2015('M_sun', "Solar mass", GM_sun.value / G.value, + 'kg', ((G.uncertainty / G.value) * + (GM_sun.value / G.value)), + "IAU 2015 Resolution B 3 + CODATA 2014", system='si') + +# Solar radius +R_sun = IAU2015('R_sun', "Nominal solar radius", 6.957e8, 'm', 0.0, + "IAU 2015 Resolution B 3", system='si') + + +# OTHER SOLAR SYSTEM QUANTITIES + +# Jupiter mass parameter +GM_jup = IAU2015('GM_jup', 'Nominal Jupiter mass parameter', 1.2668653e17, + 'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Jupiter mass (derived from mass parameter and gravitational constant) +M_jup = IAU2015('M_jup', "Jupiter mass", GM_jup.value / G.value, + 'kg', ((G.uncertainty / G.value) * + (GM_jup.value / G.value)), + "IAU 2015 Resolution B 3 + CODATA 2014", system='si') + +# Jupiter equatorial radius +R_jup = IAU2015('R_jup', "Nominal Jupiter equatorial radius", 7.1492e7, + 'm', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Earth mass parameter +GM_earth = IAU2015('GM_earth', 'Nominal Earth mass parameter', 3.986004e14, + 'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Earth mass (derived from mass parameter and gravitational constant) +M_earth = IAU2015('M_earth', "Earth mass", + GM_earth.value / G.value, + 'kg', ((G.uncertainty / G.value) * + (GM_earth.value / G.value)), + "IAU 2015 Resolution B 3 + CODATA 2014", system='si') + +# Earth equatorial radius +R_earth = IAU2015('R_earth', "Nominal Earth equatorial radius", 6.3781e6, + 'm', 0.0, "IAU 2015 Resolution B 3", system='si') diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2015.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2015.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8401a775b66af129ae866e6e80ef89f4851962f7 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/iau2015.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/setup_package.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/setup_package.py new file mode 100644 index 0000000000000000000000000000000000000000..3cd9f7c3d928c2b9a57845c6438b77d8ca63de27 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/setup_package.py @@ -0,0 +1,5 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def requires_2to3(): + return False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/setup_package.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/setup_package.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7f3ade13411b6057e26f6e3b82cd5a3da34375a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/setup_package.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/si.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/si.py new file mode 100644 index 0000000000000000000000000000000000000000..ed529bd0f1741ceea37b3a2dc02670d3533375f9 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/si.py @@ -0,0 +1,20 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" + + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +from .constant import Constant +from . import codata2014, iau2015 + +for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), + sorted(vars(iau2015).items())): + if (isinstance(_c, Constant) and _c.abbrev not in locals() + and _c.system == 'si'): + locals()[_c.abbrev] = _c diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/si.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/si.pyc new file mode 100644 index 0000000000000000000000000000000000000000..348a7bcfe1bf2d3c067b8b2e426c825c03b436a9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/si.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..800d82e7ee00f69a89739dd3a1c3c6f5e29be442 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/__init__.py @@ -0,0 +1,2 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb15b4b991780ebeea07e75c452cde705c1251bd Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_constant.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_constant.py new file mode 100644 index 0000000000000000000000000000000000000000..4ed874e86dbf7babf174f08a162fc83384d6ecbb --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_constant.py @@ -0,0 +1,165 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ...extern import six + +import copy + +import pytest + +from .. import Constant +from ...units import Quantity as Q + + +def test_c(): + + from .. import c + + # c is an exactly defined constant, so it shouldn't be changing + assert c.value == 2.99792458e8 # default is S.I. + assert c.si.value == 2.99792458e8 + assert c.cgs.value == 2.99792458e10 + + # make sure it has the necessary attributes and they're not blank + assert c.uncertainty == 0 # c is a *defined* quantity + assert c.name + assert c.reference + assert c.unit + + +def test_h(): + + from .. import h + + # check that the value is fairly close to what it should be (not exactly + # checking because this might get updated in the future) + assert abs(h.value - 6.626e-34) < 1e-38 + assert abs(h.si.value - 6.626e-34) < 1e-38 + assert abs(h.cgs.value - 6.626e-27) < 1e-31 + + # make sure it has the necessary attributes and they're not blank + assert h.uncertainty + assert h.name + assert h.reference + assert h.unit + + +def test_e(): + """Tests for #572 demonstrating how EM constants should behave.""" + + from .. import e + + # A test quantity + E = Q(100, 'V/m') + + # Without specifying a system e should not combine with other quantities + pytest.raises(TypeError, lambda: e * E) + # Try it again (as regression test on a minor issue mentioned in #745 where + # repeated attempts to use e in an expression resulted in UnboundLocalError + # instead of TypeError) + pytest.raises(TypeError, lambda: e * E) + + # e.cgs is too ambiguous and should not work at all + pytest.raises(TypeError, lambda: e.cgs * E) + + assert isinstance(e.si, Q) + assert isinstance(e.gauss, Q) + assert isinstance(e.esu, Q) + + assert e.si * E == Q(100, 'eV/m') + assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m') + assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m') + + +def test_g0(): + """Tests for #1263 demonstrating how g0 constant should behave.""" + from .. import g0 + + # g0 is an exactly defined constant, so it shouldn't be changing + assert g0.value == 9.80665 # default is S.I. + assert g0.si.value == 9.80665 + assert g0.cgs.value == 9.80665e2 + + # make sure it has the necessary attributes and they're not blank + assert g0.uncertainty == 0 # g0 is a *defined* quantity + assert g0.name + assert g0.reference + assert g0.unit + + # Check that its unit have the correct physical type + assert g0.unit.physical_type == 'acceleration' + + +def test_b_wien(): + """b_wien should give the correct peak wavelength for + given blackbody temperature. The Sun is used in this test. + + """ + from .. import b_wien + from ... import units as u + t = 5778 * u.K + w = (b_wien / t).to(u.nm) + assert round(w.value) == 502 + + +def test_unit(): + + from ... import units as u + + from ... import constants as const + + for key, val in six.iteritems(vars(const)): + if isinstance(val, Constant): + # Getting the unit forces the unit parser to run. Confirm + # that none of the constants defined in astropy have + # invalid unit. + assert not isinstance(val.unit, u.UnrecognizedUnit) + + +def test_copy(): + from ... import constants as const + cc = copy.deepcopy(const.c) + assert cc == const.c + + cc = copy.copy(const.c) + assert cc == const.c + + +def test_view(): + """Check that Constant and Quantity views can be taken (#3537, #3538).""" + from .. import c + c2 = c.view(Constant) + assert c2 == c + assert c2.value == c.value + # make sure it has the necessary attributes and they're not blank + assert c2.uncertainty == 0 # c is a *defined* quantity + assert c2.name == c.name + assert c2.reference == c.reference + assert c2.unit == c.unit + + q1 = c.view(Q) + assert q1 == c + assert q1.value == c.value + assert type(q1) is Q + assert not hasattr(q1, 'reference') + + q2 = Q(c) + assert q2 == c + assert q2.value == c.value + assert type(q2) is Q + assert not hasattr(q2, 'reference') + + c3 = Q(c, subok=True) + assert c3 == c + assert c3.value == c.value + # make sure it has the necessary attributes and they're not blank + assert c3.uncertainty == 0 # c is a *defined* quantity + assert c3.name == c.name + assert c3.reference == c.reference + assert c3.unit == c.unit + + c4 = Q(c, subok=True, copy=False) + assert c4 is c diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_constant.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_constant.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15e4b7ae4b7ebe22e9a96dd0387e56e620ae877f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_constant.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_pickle.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..8087aa05f677f489952916136f8f281b60e60232 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_pickle.py @@ -0,0 +1,22 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import, division, print_function, unicode_literals + +import pytest + +from ... import constants as const +from ...tests.helper import pickle_protocol, check_pickling_recovery # noqa +from ...extern.six.moves import zip + +originals = [const.Constant('h_fake', 'Not Planck', + 0.0, 'J s', 0.0, 'fakeref', + system='si'), + const.h, + const.e] +xfails = [True, True, True] + + +@pytest.mark.parametrize(("original", "xfail"), zip(originals, xfails)) +def test_new_constant(pickle_protocol, original, xfail): + if xfail: + pytest.xfail() + check_pickling_recovery(original, pickle_protocol) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_pickle.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_pickle.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43f1ba7b483423bf7ef0602b804fdd601ad621ec Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_pickle.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_prior_version.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_prior_version.py new file mode 100644 index 0000000000000000000000000000000000000000..2296ed705eca928cf41fc99a7ac49f44f3c8fb75 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_prior_version.py @@ -0,0 +1,161 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ...extern import six + +import copy + +import pytest + +from .. import Constant +from ...units import Quantity as Q + + +def test_c(): + + from ..codata2010 import c + + # c is an exactly defined constant, so it shouldn't be changing + assert c.value == 2.99792458e8 # default is S.I. + assert c.si.value == 2.99792458e8 + assert c.cgs.value == 2.99792458e10 + + # make sure it has the necessary attributes and they're not blank + assert c.uncertainty == 0 # c is a *defined* quantity + assert c.name + assert c.reference + assert c.unit + + +def test_h(): + + from ..codata2010 import h + from .. import h as h_current + + # check that the value is the CODATA2010 value + assert abs(h.value - 6.62606957e-34) < 1e-43 + assert abs(h.si.value - 6.62606957e-34) < 1e-43 + assert abs(h.cgs.value - 6.62606957e-27) < 1e-36 + + # Check it is different than the current value + assert abs(h.value - h_current.value) > 4e-42 + + # make sure it has the necessary attributes and they're not blank + assert h.uncertainty + assert h.name + assert h.reference + assert h.unit + + +def test_e(): + + from ..astropyconst13 import e + + # A test quantity + E = Q(100.00000348276221, 'V/m') + + # e.cgs is too ambiguous and should not work at all + with pytest.raises(TypeError): + e.cgs * E + + assert isinstance(e.si, Q) + assert isinstance(e.gauss, Q) + assert isinstance(e.esu, Q) + + assert e.si * E == Q(100, 'eV/m') + assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m') + assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m') + + +def test_g0(): + """Tests for #1263 demonstrating how g0 constant should behave.""" + from ..astropyconst13 import g0 + + # g0 is an exactly defined constant, so it shouldn't be changing + assert g0.value == 9.80665 # default is S.I. + assert g0.si.value == 9.80665 + assert g0.cgs.value == 9.80665e2 + + # make sure it has the necessary attributes and they're not blank + assert g0.uncertainty == 0 # g0 is a *defined* quantity + assert g0.name + assert g0.reference + assert g0.unit + + # Check that its unit have the correct physical type + assert g0.unit.physical_type == 'acceleration' + + +def test_b_wien(): + """b_wien should give the correct peak wavelength for + given blackbody temperature. The Sun is used in this test. + + """ + from ..astropyconst13 import b_wien + from ... import units as u + t = 5778 * u.K + w = (b_wien / t).to(u.nm) + assert round(w.value) == 502 + + +def test_unit(): + + from ... import units as u + + from .. import astropyconst13 as const + + for key, val in six.iteritems(vars(const)): + if isinstance(val, Constant): + # Getting the unit forces the unit parser to run. Confirm + # that none of the constants defined in astropy have + # invalid unit. + assert not isinstance(val.unit, u.UnrecognizedUnit) + + +def test_copy(): + from ... import constants as const + cc = copy.deepcopy(const.c) + assert cc == const.c + + cc = copy.copy(const.c) + assert cc == const.c + + +def test_view(): + """Check that Constant and Quantity views can be taken (#3537, #3538).""" + from .. import c + c2 = c.view(Constant) + assert c2 == c + assert c2.value == c.value + # make sure it has the necessary attributes and they're not blank + assert c2.uncertainty == 0 # c is a *defined* quantity + assert c2.name == c.name + assert c2.reference == c.reference + assert c2.unit == c.unit + + q1 = c.view(Q) + assert q1 == c + assert q1.value == c.value + assert type(q1) is Q + assert not hasattr(q1, 'reference') + + q2 = Q(c) + assert q2 == c + assert q2.value == c.value + assert type(q2) is Q + assert not hasattr(q2, 'reference') + + c3 = Q(c, subok=True) + assert c3 == c + assert c3.value == c.value + # make sure it has the necessary attributes and they're not blank + assert c3.uncertainty == 0 # c is a *defined* quantity + assert c3.name == c.name + assert c3.reference == c.reference + assert c3.unit == c.unit + + c4 = Q(c, subok=True, copy=False) + assert c4 is c diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_prior_version.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_prior_version.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6669d0e65d827d1e4c4a1eae4a6dd80d5cb4ff70 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/constants/tests/test_prior_version.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63699805cefbc400928c0942e2a13e5f408b85d8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/__init__.py @@ -0,0 +1,15 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from .core import * +from .kernels import * +from .utils import discretize_model + +try: + # Not guaranteed available at setup time + from .convolve import convolve, convolve_fft, interpolate_replace_nans, convolve_models +except ImportError: + if not _ASTROPY_SETUP_: + raise diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96f53cfb1ff6cdaa315614a9fabdd945c9fecaad Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_extend.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_extend.so new file mode 100755 index 0000000000000000000000000000000000000000..a74d463eef6ebf9cdeffa23afda2e60005ee4cf3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_extend.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_fill.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_fill.so new file mode 100755 index 0000000000000000000000000000000000000000..70d0d4e3061583a82e7033ecc187966ed3732216 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_fill.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_none.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_none.so new file mode 100755 index 0000000000000000000000000000000000000000..89d24e86deae3fd3d96a682f4cdcb2c22462086a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_none.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_wrap.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_wrap.so new file mode 100755 index 0000000000000000000000000000000000000000..f26bedbc0fc34e85083988f83562ce56a6680e84 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/boundary_wrap.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/convolve.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/convolve.py new file mode 100644 index 0000000000000000000000000000000000000000..9429916f98e3004a6e067f0c8ae0a9a382ef2391 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/convolve.py @@ -0,0 +1,818 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import warnings + +import numpy as np +from functools import partial + +from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION +from ..utils.exceptions import AstropyUserWarning +from ..utils.console import human_file_size +from ..utils.decorators import deprecated_renamed_argument +from .. import units as u +from ..nddata import support_nddata +from ..modeling.core import _make_arithmetic_operator, BINARY_OPERATORS +from ..modeling.core import _CompoundModelMeta + +from ..extern.six.moves import range, zip + + +# Disabling all doctests in this module until a better way of handling warnings +# in doctests can be determined +__doctest_skip__ = ['*'] + +BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend'] + + +@support_nddata(data='array') +def convolve(array, kernel, boundary='fill', fill_value=0., + nan_treatment='interpolate', normalize_kernel=True, mask=None, + preserve_nan=False, normalization_zero_tol=1e-8): + ''' + Convolve an array with a kernel. + + This routine differs from `scipy.ndimage.convolve` because + it includes a special treatment for ``NaN`` values. Rather than + including ``NaN`` values in the array in the convolution calculation, which + causes large ``NaN`` holes in the convolved array, ``NaN`` values are + replaced with interpolated values using the kernel as an interpolation + function. + + Parameters + ---------- + array : `numpy.ndarray` or `~astropy.nddata.NDData` + The array to convolve. This should be a 1, 2, or 3-dimensional array + or a list or a set of nested lists representing a 1, 2, or + 3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of + the `~astropy.nddata.NDData` will be used as the ``mask`` argument. + kernel : `numpy.ndarray` or `~astropy.convolution.Kernel` + The convolution kernel. The number of dimensions should match those for + the array, and the dimensions should be odd in all directions. If a + masked array, the masked values will be replaced by ``fill_value``. + boundary : str, optional + A flag indicating how to handle boundaries: + * `None` + Set the ``result`` values to zero where the kernel + extends beyond the edge of the array. + * 'fill' + Set values outside the array boundary to ``fill_value`` (default). + * 'wrap' + Periodic boundary that wrap to the other side of ``array``. + * 'extend' + Set values outside the array to the nearest ``array`` + value. + fill_value : float, optional + The value to use outside the array when using ``boundary='fill'`` + normalize_kernel : bool, optional + Whether to normalize the kernel to have a sum of one prior to + convolving + nan_treatment : 'interpolate', 'fill' + interpolate will result in renormalization of the kernel at each + position ignoring (pixels that are NaN in the image) in both the image + and the kernel. + 'fill' will replace the NaN pixels with a fixed numerical value (default + zero, see ``fill_value``) prior to convolution + Note that if the kernel has a sum equal to zero, NaN interpolation + is not possible and will raise an exception + preserve_nan : bool + After performing convolution, should pixels that were originally NaN + again become NaN? + mask : `None` or `numpy.ndarray` + A "mask" array. Shape must match ``array``, and anything that is masked + (i.e., not 0/`False`) will be set to NaN for the convolution. If + `None`, no masking will be performed unless ``array`` is a masked array. + If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is + masked of it is masked in either ``mask`` *or* ``array.mask``. + normalization_zero_tol: float, optional + The absolute tolerance on whether the kernel is different than zero. + If the kernel sums to zero to within this precision, it cannot be + normalized. Default is "1e-8". + + Returns + ------- + result : `numpy.ndarray` + An array with the same dimensions and as the input array, + convolved with kernel. The data type depends on the input + array type. If array is a floating point type, then the + return array keeps the same data type, otherwise the type + is ``numpy.float``. + + Notes + ----- + For masked arrays, masked values are treated as NaNs. The convolution + is always done at ``numpy.float`` precision. + ''' + from .boundary_none import (convolve1d_boundary_none, + convolve2d_boundary_none, + convolve3d_boundary_none) + + from .boundary_extend import (convolve1d_boundary_extend, + convolve2d_boundary_extend, + convolve3d_boundary_extend) + + from .boundary_fill import (convolve1d_boundary_fill, + convolve2d_boundary_fill, + convolve3d_boundary_fill) + + from .boundary_wrap import (convolve1d_boundary_wrap, + convolve2d_boundary_wrap, + convolve3d_boundary_wrap) + + if boundary not in BOUNDARY_OPTIONS: + raise ValueError("Invalid boundary option: must be one of {0}" + .format(BOUNDARY_OPTIONS)) + + if nan_treatment not in ('interpolate', 'fill'): + raise ValueError("nan_treatment must be one of 'interpolate','fill'") + + # The cython routines all need float type inputs (so, a particular + # bit size, endianness, etc.). So we have to convert, which also + # has the effect of making copies so we don't modify the inputs. + # After this, the variables we work with will be array_internal, and + # kernel_internal. However -- we do want to keep track of what type + # the input array was so we can cast the result to that at the end + # if it's a floating point type. Don't bother with this for lists -- + # just always push those as np.float. + # It is always necessary to make a copy of kernel (since it is modified), + # but, if we just so happen to be lucky enough to have the input array + # have exactly the desired type, we just alias to array_internal + + # Check if kernel is kernel instance + if isinstance(kernel, Kernel): + # Check if array is also kernel instance, if so convolve and + # return new kernel instance + if isinstance(array, Kernel): + if isinstance(array, Kernel1D) and isinstance(kernel, Kernel1D): + new_array = convolve1d_boundary_fill(array.array, kernel.array, + 0, True) + new_kernel = Kernel1D(array=new_array) + elif isinstance(array, Kernel2D) and isinstance(kernel, Kernel2D): + new_array = convolve2d_boundary_fill(array.array, kernel.array, + 0, True) + new_kernel = Kernel2D(array=new_array) + else: + raise Exception("Can't convolve 1D and 2D kernel.") + new_kernel._separable = kernel._separable and array._separable + new_kernel._is_bool = False + return new_kernel + kernel = kernel.array + + # Check that the arguments are lists or Numpy arrays + + if isinstance(array, list): + array_internal = np.array(array, dtype=np.float) + array_dtype = array_internal.dtype + elif isinstance(array, np.ndarray): + # Note this won't copy if it doesn't have to -- which is okay + # because none of what follows modifies array_internal. + array_dtype = array.dtype + array_internal = array.astype(float, copy=False) + else: + raise TypeError("array should be a list or a Numpy array") + + if isinstance(kernel, list): + kernel_internal = np.array(kernel, dtype=float) + elif isinstance(kernel, np.ndarray): + # Note this always makes a copy, since we will be modifying it + kernel_internal = kernel.astype(float) + else: + raise TypeError("kernel should be a list or a Numpy array") + + # Check that the number of dimensions is compatible + if array_internal.ndim != kernel_internal.ndim: + raise Exception('array and kernel have differing number of ' + 'dimensions.') + + # anything that's masked must be turned into NaNs for the interpolation. + # This requires copying the array_internal + array_internal_copied = False + if np.ma.is_masked(array): + array_internal = array_internal.filled(np.nan) + array_internal_copied = True + if mask is not None: + if not array_internal_copied: + array_internal = array_internal.copy() + array_internal_copied = True + # mask != 0 yields a bool mask for all ints/floats/bool + array_internal[mask != 0] = np.nan + if np.ma.is_masked(kernel): + # *kernel* doesn't support NaN interpolation, so instead we just fill it + kernel_internal = kernel_internal.filled(fill_value) + + # Mark the NaN values so we can replace them later if interpolate_nan is + # not set + if preserve_nan: + badvals = np.isnan(array_internal) + + if nan_treatment == 'fill': + initially_nan = np.isnan(array_internal) + array_internal[initially_nan] = fill_value + + # Because the Cython routines have to normalize the kernel on the fly, we + # explicitly normalize the kernel here, and then scale the image at the + # end if normalization was not requested. + kernel_sum = kernel_internal.sum() + kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol) + + if (kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero) and normalize_kernel: + raise Exception("The kernel can't be normalized, because its sum is " + "close to zero. The sum of the given kernel is < {0}" + .format(1. / MAX_NORMALIZATION)) + + if not kernel_sums_to_zero: + kernel_internal /= kernel_sum + + renormalize_by_kernel = not kernel_sums_to_zero + + if array_internal.ndim == 0: + raise Exception("cannot convolve 0-dimensional arrays") + elif array_internal.ndim == 1: + if boundary == 'extend': + result = convolve1d_boundary_extend(array_internal, + kernel_internal, + renormalize_by_kernel) + elif boundary == 'fill': + result = convolve1d_boundary_fill(array_internal, + kernel_internal, + float(fill_value), + renormalize_by_kernel) + elif boundary == 'wrap': + result = convolve1d_boundary_wrap(array_internal, + kernel_internal, + renormalize_by_kernel) + elif boundary is None: + result = convolve1d_boundary_none(array_internal, + kernel_internal, + renormalize_by_kernel) + elif array_internal.ndim == 2: + if boundary == 'extend': + result = convolve2d_boundary_extend(array_internal, + kernel_internal, + renormalize_by_kernel, + ) + elif boundary == 'fill': + result = convolve2d_boundary_fill(array_internal, + kernel_internal, + float(fill_value), + renormalize_by_kernel, + ) + elif boundary == 'wrap': + result = convolve2d_boundary_wrap(array_internal, + kernel_internal, + renormalize_by_kernel, + ) + elif boundary is None: + result = convolve2d_boundary_none(array_internal, + kernel_internal, + renormalize_by_kernel, + ) + elif array_internal.ndim == 3: + if boundary == 'extend': + result = convolve3d_boundary_extend(array_internal, + kernel_internal, + renormalize_by_kernel) + elif boundary == 'fill': + result = convolve3d_boundary_fill(array_internal, + kernel_internal, + float(fill_value), + renormalize_by_kernel) + elif boundary == 'wrap': + result = convolve3d_boundary_wrap(array_internal, + kernel_internal, + renormalize_by_kernel) + elif boundary is None: + result = convolve3d_boundary_none(array_internal, + kernel_internal, + renormalize_by_kernel) + else: + raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional ' + 'arrays at this time') + + # If normalization was not requested, we need to scale the array (since + # the kernel is effectively normalized within the cython functions) + if not normalize_kernel and not kernel_sums_to_zero: + result *= kernel_sum + + if preserve_nan: + result[badvals] = np.nan + + if nan_treatment == 'fill': + array_internal[initially_nan] = np.nan + + # Try to preserve the input type if it's a floating point type + if array_dtype.kind == 'f': + # Avoid making another copy if possible + try: + return result.astype(array_dtype, copy=False) + except TypeError: + return result.astype(array_dtype) + else: + return result + + +@deprecated_renamed_argument('interpolate_nan', 'nan_treatment', 'v2.0.0') +@support_nddata(data='array') +def convolve_fft(array, kernel, boundary='fill', fill_value=0., + nan_treatment='interpolate', normalize_kernel=True, + normalization_zero_tol=1e-8, + preserve_nan=False, mask=None, crop=True, return_fft=False, + fft_pad=None, psf_pad=None, quiet=False, + min_wt=0.0, allow_huge=False, + fftn=np.fft.fftn, ifftn=np.fft.ifftn, + complex_dtype=np.complex): + """ + Convolve an ndarray with an nd-kernel. Returns a convolved image with + ``shape = array.shape``. Assumes kernel is centered. + + `convolve_fft` is very similar to `convolve` in that it replaces ``NaN`` + values in the original image with interpolated values using the kernel as + an interpolation function. However, it also includes many additional + options specific to the implementation. + + `convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways: + + * It can treat ``NaN`` values as zeros or interpolate over them. + * ``inf`` values are treated as ``NaN`` + * (optionally) It pads to the nearest 2^n size to improve FFT speed. + * Its only valid ``mode`` is 'same' (i.e., the same shape array is returned) + * It lets you use your own fft, e.g., + `pyFFTW `_ or + `pyFFTW3 `_ , which can lead to + performance improvements, depending on your system configuration. pyFFTW3 + is threaded, and therefore may yield significant performance benefits on + multi-core machines at the cost of greater memory requirements. Specify + the ``fftn`` and ``ifftn`` keywords to override the default, which is + `numpy.fft.fft` and `numpy.fft.ifft`. + + Parameters + ---------- + array : `numpy.ndarray` + Array to be convolved with ``kernel``. It can be of any + dimensionality, though only 1, 2, and 3d arrays have been tested. + kernel : `numpy.ndarray` or `astropy.convolution.Kernel` + The convolution kernel. The number of dimensions should match those + for the array. The dimensions *do not* have to be odd in all directions, + unlike in the non-fft `convolve` function. The kernel will be + normalized if ``normalize_kernel`` is set. It is assumed to be centered + (i.e., shifts may result if your kernel is asymmetric) + boundary : {'fill', 'wrap'}, optional + A flag indicating how to handle boundaries: + + * 'fill': set values outside the array boundary to fill_value + (default) + * 'wrap': periodic boundary + + The `None` and 'extend' parameters are not supported for FFT-based + convolution + fill_value : float, optional + The value to use outside the array when using boundary='fill' + nan_treatment : 'interpolate', 'fill' + ``interpolate`` will result in renormalization of the kernel at each + position ignoring (pixels that are NaN in the image) in both the image + and the kernel. ``fill`` will replace the NaN pixels with a fixed + numerical value (default zero, see ``fill_value``) prior to + convolution. Note that if the kernel has a sum equal to zero, NaN + interpolation is not possible and will raise an exception. + normalize_kernel : function or boolean, optional + If specified, this is the function to divide kernel by to normalize it. + e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be: + ``kernel = kernel / np.sum(kernel)``. If True, defaults to + ``normalize_kernel = np.sum``. + normalization_zero_tol: float, optional + The absolute tolerance on whether the kernel is different than zero. + If the kernel sums to zero to within this precision, it cannot be + normalized. Default is "1e-8". + preserve_nan : bool + After performing convolution, should pixels that were originally NaN + again become NaN? + mask : `None` or `numpy.ndarray` + A "mask" array. Shape must match ``array``, and anything that is masked + (i.e., not 0/`False`) will be set to NaN for the convolution. If + `None`, no masking will be performed unless ``array`` is a masked array. + If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is + masked of it is masked in either ``mask`` *or* ``array.mask``. + + + Other Parameters + ---------------- + min_wt : float, optional + If ignoring ``NaN`` / zeros, force all grid points with a weight less than + this value to ``NaN`` (the weight of a grid point with *no* ignored + neighbors is 1.0). + If ``min_wt`` is zero, then all zero-weight points will be set to zero + instead of ``NaN`` (which they would be otherwise, because 1/0 = nan). + See the examples below + fft_pad : bool, optional + Default on. Zero-pad image to the nearest 2^n. With + ``boundary='wrap'``, this will be disabled. + psf_pad : bool, optional + Zero-pad image to be at least the sum of the image sizes to avoid + edge-wrapping when smoothing. This is enabled by default with + ``boundary='fill'``, but it can be overridden with a boolean option. + ``boundary='wrap'`` and ``psf_pad=True`` are not compatible. + crop : bool, optional + Default on. Return an image of the size of the larger of the input + image and the kernel. + If the image and kernel are asymmetric in opposite directions, will + return the largest image in both directions. + For example, if an input image has shape [100,3] but a kernel with shape + [6,6] is used, the output will be [100,6]. + return_fft : bool, optional + Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is + ``ifft(fft(image)*fft(kernel))``). Useful for making PSDs. + fftn, ifftn : functions, optional + The fft and inverse fft functions. Can be overridden to use your own + ffts, e.g. an fftw3 wrapper or scipy's fftn, + ``fft=scipy.fftpack.fftn`` + complex_dtype : numpy.complex, optional + Which complex dtype to use. `numpy` has a range of options, from 64 to + 256. + quiet : bool, optional + Silence warning message about NaN interpolation + allow_huge : bool, optional + Allow huge arrays in the FFT? If False, will raise an exception if the + array or kernel size is >1 GB + + Raises + ------ + ValueError: + If the array is bigger than 1 GB after padding, will raise this exception + unless ``allow_huge`` is True + + See Also + -------- + convolve: + Convolve is a non-fft version of this code. It is more memory + efficient and for small kernels can be faster. + + Returns + ------- + default : ndarray + ``array`` convolved with ``kernel``. If ``return_fft`` is set, returns + ``fft(array) * fft(kernel)``. If crop is not set, returns the + image, but with the fft-padded size instead of the input size + + Notes + ----- + With ``psf_pad=True`` and a large PSF, the resulting data can become + very large and consume a lot of memory. See Issue + https://github.com/astropy/astropy/pull/4366 for further detail. + + Examples + -------- + >>> convolve_fft([1, 0, 3], [1, 1, 1]) + array([ 1., 4., 3.]) + + >>> convolve_fft([1, np.nan, 3], [1, 1, 1]) + array([ 1., 4., 3.]) + + >>> convolve_fft([1, 0, 3], [0, 1, 0]) + array([ 1., 0., 3.]) + + >>> convolve_fft([1, 2, 3], [1]) + array([ 1., 2., 3.]) + + >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate') + ... + array([ 1., 0., 3.]) + + >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate', + ... min_wt=1e-8) + array([ 1., nan, 3.]) + + >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate') + array([ 1., 4., 3.]) + + >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', + ... normalize_kernel=True) + array([ 1., 2., 3.]) + + >>> import scipy.fftpack # optional - requires scipy + >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', + ... normalize_kernel=True, + ... fftn=scipy.fftpack.fft, ifftn=scipy.fftpack.ifft) + array([ 1., 2., 3.]) + + """ + # Checking copied from convolve.py - however, since FFTs have real & + # complex components, we change the types. Only the real part will be + # returned! Note that this always makes a copy. + + # Check kernel is kernel instance + if isinstance(kernel, Kernel): + kernel = kernel.array + if isinstance(array, Kernel): + raise TypeError("Can't convolve two kernels with convolve_fft. " + "Use convolve instead.") + + if nan_treatment not in ('interpolate', 'fill'): + raise ValueError("nan_treatment must be one of 'interpolate','fill'") + + # Convert array dtype to complex + # and ensure that list inputs become arrays + array = np.asarray(array, dtype=np.complex) + kernel = np.asarray(kernel, dtype=np.complex) + + # Check that the number of dimensions is compatible + if array.ndim != kernel.ndim: + raise ValueError("Image and kernel must have same number of " + "dimensions") + + arrayshape = array.shape + kernshape = kernel.shape + + array_size_B = (np.product(arrayshape, dtype=np.int64) * + np.dtype(complex_dtype).itemsize)*u.byte + if array_size_B > 1*u.GB and not allow_huge: + raise ValueError("Size Error: Arrays will be {}. Use " + "allow_huge=True to override this exception." + .format(human_file_size(array_size_B.to_value(u.byte)))) + + # mask catching - masks must be turned into NaNs for use later in the image + if np.ma.is_masked(array): + mamask = array.mask + array = np.array(array) + array[mamask] = np.nan + elif mask is not None: + # copying here because we have to mask it below. But no need to copy + # if mask is None because we won't modify it. + array = np.array(array) + if mask is not None: + # mask != 0 yields a bool mask for all ints/floats/bool + array[mask != 0] = np.nan + # the *kernel* doesn't support NaN interpolation, so instead we just fill it + if np.ma.is_masked(kernel): + kernel = kernel.filled(0) + + # NaN and inf catching + nanmaskarray = np.isnan(array) | np.isinf(array) + array[nanmaskarray] = 0 + nanmaskkernel = np.isnan(kernel) | np.isinf(kernel) + kernel[nanmaskkernel] = 0 + + if normalize_kernel is True: + if kernel.sum() < 1. / MAX_NORMALIZATION: + raise Exception("The kernel can't be normalized, because its sum is " + "close to zero. The sum of the given kernel is < {0}" + .format(1. / MAX_NORMALIZATION)) + kernel_scale = kernel.sum() + normalized_kernel = kernel / kernel_scale + kernel_scale = 1 # if we want to normalize it, leave it normed! + elif normalize_kernel: + # try this. If a function is not passed, the code will just crash... I + # think type checking would be better but PEPs say otherwise... + kernel_scale = normalize_kernel(kernel) + normalized_kernel = kernel / kernel_scale + else: + kernel_scale = kernel.sum() + if np.abs(kernel_scale) < normalization_zero_tol: + if nan_treatment == 'interpolate': + raise ValueError('Cannot interpolate NaNs with an unnormalizable kernel') + else: + # the kernel's sum is near-zero, so it can't be scaled + kernel_scale = 1 + normalized_kernel = kernel + else: + # the kernel is normalizable; we'll temporarily normalize it + # now and undo the normalization later. + normalized_kernel = kernel / kernel_scale + + if boundary is None: + warnings.warn("The convolve_fft version of boundary=None is " + "equivalent to the convolve boundary='fill'. There is " + "no FFT equivalent to convolve's " + "zero-if-kernel-leaves-boundary", AstropyUserWarning) + if psf_pad is None: + psf_pad = True + if fft_pad is None: + fft_pad = True + elif boundary == 'fill': + # create a boundary region at least as large as the kernel + if psf_pad is False: + warnings.warn("psf_pad was set to {0}, which overrides the " + "boundary='fill' setting.".format(psf_pad), + AstropyUserWarning) + else: + psf_pad = True + if fft_pad is None: + # default is 'True' according to the docstring + fft_pad = True + elif boundary == 'wrap': + if psf_pad: + raise ValueError("With boundary='wrap', psf_pad cannot be enabled.") + psf_pad = False + if fft_pad: + raise ValueError("With boundary='wrap', fft_pad cannot be enabled.") + fft_pad = False + fill_value = 0 # force zero; it should not be used + elif boundary == 'extend': + raise NotImplementedError("The 'extend' option is not implemented " + "for fft-based convolution") + + # find ideal size (power of 2) for fft. + # Can add shapes because they are tuples + if fft_pad: # default=True + if psf_pad: # default=False + # add the dimensions and then take the max (bigger) + fsize = 2 ** np.ceil(np.log2( + np.max(np.array(arrayshape) + np.array(kernshape)))) + else: + # add the shape lists (max of a list of length 4) (smaller) + # also makes the shapes square + fsize = 2 ** np.ceil(np.log2(np.max(arrayshape + kernshape))) + newshape = np.array([fsize for ii in range(array.ndim)], dtype=int) + else: + if psf_pad: + # just add the biggest dimensions + newshape = np.array(arrayshape) + np.array(kernshape) + else: + newshape = np.array([np.max([imsh, kernsh]) + for imsh, kernsh in zip(arrayshape, kernshape)]) + + # perform a second check after padding + array_size_C = (np.product(newshape, dtype=np.int64) * + np.dtype(complex_dtype).itemsize)*u.byte + if array_size_C > 1*u.GB and not allow_huge: + raise ValueError("Size Error: Arrays will be {}. Use " + "allow_huge=True to override this exception." + .format(human_file_size(array_size_C))) + + # For future reference, this can be used to predict "almost exactly" + # how much *additional* memory will be used. + # size * (array + kernel + kernelfft + arrayfft + + # (kernel*array)fft + + # optional(weight image + weight_fft + weight_ifft) + + # optional(returned_fft)) + # total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize + # * (5 + 3*((interpolate_nan or ) and kernel_is_normalized)) + # + (1 + (not return_fft)) * + # np.product(arrayshape)*np.dtype(complex_dtype).itemsize + # + np.product(arrayshape)*np.dtype(bool).itemsize + # + np.product(kernshape)*np.dtype(bool).itemsize) + # ) / 1024.**3 + + # separate each dimension by the padding size... this is to determine the + # appropriate slice size to get back to the input dimensions + arrayslices = [] + kernslices = [] + for ii, (newdimsize, arraydimsize, kerndimsize) in enumerate(zip(newshape, arrayshape, kernshape)): + center = newdimsize - (newdimsize + 1) // 2 + arrayslices += [slice(center - arraydimsize // 2, + center + (arraydimsize + 1) // 2)] + kernslices += [slice(center - kerndimsize // 2, + center + (kerndimsize + 1) // 2)] + arrayslices = tuple(arrayslices) + kernslices = tuple(kernslices) + + if not np.all(newshape == arrayshape): + if np.isfinite(fill_value): + bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value + else: + bigarray = np.zeros(newshape, dtype=complex_dtype) + bigarray[arrayslices] = array + else: + bigarray = array + + if not np.all(newshape == kernshape): + bigkernel = np.zeros(newshape, dtype=complex_dtype) + bigkernel[kernslices] = normalized_kernel + else: + bigkernel = normalized_kernel + + arrayfft = fftn(bigarray) + # need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity + kernfft = fftn(np.fft.ifftshift(bigkernel)) + fftmult = arrayfft * kernfft + + interpolate_nan = (nan_treatment == 'interpolate') + if interpolate_nan: + if not np.isfinite(fill_value): + bigimwt = np.zeros(newshape, dtype=complex_dtype) + else: + bigimwt = np.ones(newshape, dtype=complex_dtype) + + bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan + wtfft = fftn(bigimwt) + + # You can only get to this point if kernel_is_normalized + wtfftmult = wtfft * kernfft + wtsm = ifftn(wtfftmult) + # need to re-zero weights outside of the image (if it is padded, we + # still don't weight those regions) + bigimwt[arrayslices] = wtsm.real[arrayslices] + else: + bigimwt = 1 + + if np.isnan(fftmult).any(): + # this check should be unnecessary; call it an insanity check + raise ValueError("Encountered NaNs in convolve. This is disallowed.") + + # restore NaNs in original image (they were modified inplace earlier) + # We don't have to worry about masked arrays - if input was masked, it was + # copied + array[nanmaskarray] = np.nan + kernel[nanmaskkernel] = np.nan + + fftmult *= kernel_scale + + if return_fft: + return fftmult + + if interpolate_nan: + rifft = (ifftn(fftmult)) / bigimwt + if not np.isscalar(bigimwt): + if min_wt > 0.: + rifft[bigimwt < min_wt] = np.nan + else: + # Set anything with no weight to zero (taking into account + # slight offsets due to floating-point errors). + rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0 + else: + rifft = ifftn(fftmult) + + if preserve_nan: + rifft[arrayslices][nanmaskarray] = np.nan + + if crop: + result = rifft[arrayslices].real + return result + else: + return rifft.real + + +def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs): + """ + Given a data set containing NaNs, replace the NaNs by interpolating from + neighboring data points with a given kernel. + + Parameters + ---------- + array : `numpy.ndarray` + Array to be convolved with ``kernel``. It can be of any + dimensionality, though only 1, 2, and 3d arrays have been tested. + kernel : `numpy.ndarray` or `astropy.convolution.Kernel` + The convolution kernel. The number of dimensions should match those + for the array. The dimensions *do not* have to be odd in all directions, + unlike in the non-fft `convolve` function. The kernel will be + normalized if ``normalize_kernel`` is set. It is assumed to be centered + (i.e., shifts may result if your kernel is asymmetric). The kernel + *must be normalizable* (i.e., its sum cannot be zero). + convolve : `convolve` or `convolve_fft` + One of the two convolution functions defined in this package. + + Returns + ------- + newarray : `numpy.ndarray` + A copy of the original array with NaN pixels replaced with their + interpolated counterparts + """ + + if not np.any(np.isnan(array)): + return array.copy() + + newarray = array.copy() + + convolved = convolve(array, kernel, nan_treatment='interpolate', + normalize_kernel=True, **kwargs) + + isnan = np.isnan(array) + newarray[isnan] = convolved[isnan] + + return newarray + + +def convolve_models(model, kernel, mode='convolve_fft', **kwargs): + """ + Convolve two models using `~astropy.convolution.convolve_fft`. + + Parameters + ---------- + model : `~astropy.modeling.core.Model` + Functional model + kernel : `~astropy.modeling.core.Model` + Convolution kernel + mode : str + Keyword representing which function to use for convolution. + * 'convolve_fft' : use `~astropy.convolution.convolve_fft` function. + * 'convolve' : use `~astropy.convolution.convolve`. + kwargs : dict + Keyword arguments to me passed either to `~astropy.convolution.convolve` + or `~astropy.convolution.convolve_fft` depending on ``mode``. + + Returns + ------- + default : CompoundModel + Convolved model + """ + + if mode == 'convolve_fft': + BINARY_OPERATORS['convolve_fft'] = _make_arithmetic_operator(partial(convolve_fft, **kwargs)) + elif mode == 'convolve': + BINARY_OPERATORS['convolve'] = _make_arithmetic_operator(partial(convolve, **kwargs)) + else: + raise ValueError('Mode {} is not supported.'.format(mode)) + + return _CompoundModelMeta._from_operator(mode, model, kernel) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/convolve.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/convolve.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca5fba0121d1396e7ed7bbdaa825db8825814782 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/convolve.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/core.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/core.py new file mode 100644 index 0000000000000000000000000000000000000000..632e324d70d7c6fbff0d6b6bff16a97c76a92ca2 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/core.py @@ -0,0 +1,372 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This module contains the convolution and filter functionalities of astropy. + +A few conceptual notes: +A filter kernel is mainly characterized by its response function. In the 1D +case we speak of "impulse response function", in the 2D case we call it "point +spread function". This response function is given for every kernel by an +astropy `FittableModel`, which is evaluated on a grid to obtain a filter array, +which can then be applied to binned data. + +The model is centered on the array and should have an amplitude such that the array +integrates to one per default. + +Currently only symmetric 2D kernels are supported. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import warnings +import copy + +import numpy as np +from ..utils.exceptions import AstropyUserWarning +from .utils import (discretize_model, add_kernel_arrays_1D, + add_kernel_arrays_2D) + +MAX_NORMALIZATION = 100 + +__all__ = ['Kernel', 'Kernel1D', 'Kernel2D', 'kernel_arithmetics'] + + +class Kernel(object): + """ + Convolution kernel base class. + + Parameters + ---------- + array : `~numpy.ndarray` + Kernel array. + """ + _separable = False + _is_bool = True + _model = None + + def __init__(self, array): + self._array = np.asanyarray(array) + + @property + def truncation(self): + """ + Deviation from the normalization to one. + """ + return self._truncation + + @property + def is_bool(self): + """ + Indicates if kernel is bool. + + If the kernel is bool the multiplication in the convolution could + be omitted, to increase the performance. + """ + return self._is_bool + + @property + def model(self): + """ + Kernel response model. + """ + return self._model + + @property + def dimension(self): + """ + Kernel dimension. + """ + return self.array.ndim + + @property + def center(self): + """ + Index of the kernel center. + """ + return [axes_size // 2 for axes_size in self._array.shape] + + def normalize(self, mode='integral'): + """ + Normalize the filter kernel. + + Parameters + ---------- + mode : {'integral', 'peak'} + One of the following modes: + * 'integral' (default) + Kernel is normalized such that its integral = 1. + * 'peak' + Kernel is normalized such that its peak = 1. + """ + + if mode == 'integral': + normalization = self._array.sum() + elif mode == 'peak': + normalization = self._array.max() + else: + raise ValueError("invalid mode, must be 'integral' or 'peak'") + + # Warn the user for kernels that sum to zero + if normalization == 0: + warnings.warn('The kernel cannot be normalized because it ' + 'sums to zero.', AstropyUserWarning) + else: + np.divide(self._array, normalization, self._array) + + self._kernel_sum = self._array.sum() + + @property + def shape(self): + """ + Shape of the kernel array. + """ + return self._array.shape + + @property + def separable(self): + """ + Indicates if the filter kernel is separable. + + A 2D filter is separable, when its filter array can be written as the + outer product of two 1D arrays. + + If a filter kernel is separable, higher dimension convolutions will be + performed by applying the 1D filter array consecutively on every dimension. + This is significantly faster, than using a filter array with the same + dimension. + """ + return self._separable + + @property + def array(self): + """ + Filter kernel array. + """ + return self._array + + def __add__(self, kernel): + """ + Add two filter kernels. + """ + return kernel_arithmetics(self, kernel, 'add') + + def __sub__(self, kernel): + """ + Subtract two filter kernels. + """ + return kernel_arithmetics(self, kernel, 'sub') + + def __mul__(self, value): + """ + Multiply kernel with number or convolve two kernels. + """ + return kernel_arithmetics(self, value, "mul") + + def __rmul__(self, value): + """ + Multiply kernel with number or convolve two kernels. + """ + return kernel_arithmetics(self, value, "mul") + + def __array__(self): + """ + Array representation of the kernel. + """ + return self._array + + def __array_wrap__(self, array, context=None): + """ + Wrapper for multiplication with numpy arrays. + """ + if type(context[0]) == np.ufunc: + return NotImplemented + else: + return array + + +class Kernel1D(Kernel): + """ + Base class for 1D filter kernels. + + Parameters + ---------- + model : `~astropy.modeling.FittableModel` + Model to be evaluated. + x_size : odd int, optional + Size of the kernel array. Default = 8 * width. + array : `~numpy.ndarray` + Kernel array. + width : number + Width of the filter kernel. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + """ + + def __init__(self, model=None, x_size=None, array=None, **kwargs): + # Initialize from model + if array is None: + if self._model is None: + raise TypeError("Must specify either array or model.") + + if x_size is None: + x_size = self._default_size + elif x_size != int(x_size): + raise TypeError("x_size should be an integer") + + # Set ranges where to evaluate the model + + if x_size % 2 == 0: # even kernel + x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5) + else: # odd kernel + x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1) + + array = discretize_model(self._model, x_range, **kwargs) + + # Initialize from array + elif array is not None: + self._model = None + + super(Kernel1D, self).__init__(array) + + +class Kernel2D(Kernel): + """ + Base class for 2D filter kernels. + + Parameters + ---------- + model : `~astropy.modeling.FittableModel` + Model to be evaluated. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * width. + array : `~numpy.ndarray` + Kernel array. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + width : number + Width of the filter kernel. + factor : number, optional + Factor of oversampling. Default factor = 10. + """ + + def __init__(self, model=None, x_size=None, y_size=None, array=None, **kwargs): + + # Initialize from model + if array is None: + if self._model is None: + raise TypeError("Must specify either array or model.") + + if x_size is None: + x_size = self._default_size + elif x_size != int(x_size): + raise TypeError("x_size should be an integer") + + if y_size is None: + y_size = x_size + elif y_size != int(y_size): + raise TypeError("y_size should be an integer") + + # Set ranges where to evaluate the model + + if x_size % 2 == 0: # even kernel + x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5) + else: # odd kernel + x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1) + + if y_size % 2 == 0: # even kernel + y_range = (-(int(y_size)) // 2 + 0.5, (int(y_size)) // 2 + 0.5) + else: # odd kernel + y_range = (-(int(y_size) - 1) // 2, (int(y_size) - 1) // 2 + 1) + + array = discretize_model(self._model, x_range, y_range, **kwargs) + + # Initialize from array + elif array is not None: + self._model = None + + super(Kernel2D, self).__init__(array) + + +def kernel_arithmetics(kernel, value, operation): + """ + Add, subtract or multiply two kernels. + + Parameters + ---------- + kernel : `astropy.convolution.Kernel` + Kernel instance + value : kernel, float or int + Value to operate with + operation : {'add', 'sub', 'mul'} + One of the following operations: + * 'add' + Add two kernels + * 'sub' + Subtract two kernels + * 'mul' + Multiply kernel with number or convolve two kernels. + """ + # 1D kernels + if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D): + if operation == "add": + new_array = add_kernel_arrays_1D(kernel.array, value.array) + if operation == "sub": + new_array = add_kernel_arrays_1D(kernel.array, -value.array) + if operation == "mul": + raise Exception("Kernel operation not supported. Maybe you want " + "to use convolve(kernel1, kernel2) instead.") + new_kernel = Kernel1D(array=new_array) + new_kernel._separable = kernel._separable and value._separable + new_kernel._is_bool = kernel._is_bool or value._is_bool + + # 2D kernels + elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D): + if operation == "add": + new_array = add_kernel_arrays_2D(kernel.array, value.array) + if operation == "sub": + new_array = add_kernel_arrays_2D(kernel.array, -value.array) + if operation == "mul": + raise Exception("Kernel operation not supported. Maybe you want " + "to use convolve(kernel1, kernel2) instead.") + new_kernel = Kernel2D(array=new_array) + new_kernel._separable = kernel._separable and value._separable + new_kernel._is_bool = kernel._is_bool or value._is_bool + + # kernel and number + elif ((isinstance(kernel, Kernel1D) or isinstance(kernel, Kernel2D)) + and np.isscalar(value)): + if operation == "mul": + new_kernel = copy.copy(kernel) + new_kernel._array *= value + else: + raise Exception("Kernel operation not supported.") + else: + raise Exception("Kernel operation not supported.") + return new_kernel diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/core.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/core.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1628e103ec3d8bcba20c3c9ac50e693b57f368b5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/core.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/kernels.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..d9c9a187defe3d5ccda194c48a1e791a74c8391f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/kernels.py @@ -0,0 +1,1018 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import math + +import numpy as np + +from .core import Kernel1D, Kernel2D, Kernel +from .utils import KernelSizeError +from ..modeling import models +from ..modeling.core import Fittable1DModel, Fittable2DModel + + +__all__ = ['Gaussian1DKernel', 'Gaussian2DKernel', 'CustomKernel', + 'Box1DKernel', 'Box2DKernel', 'Tophat2DKernel', + 'Trapezoid1DKernel', 'MexicanHat1DKernel', 'MexicanHat2DKernel', + 'AiryDisk2DKernel', 'Moffat2DKernel', 'Model1DKernel', + 'Model2DKernel', 'TrapezoidDisk2DKernel', 'Ring2DKernel'] + + +def _round_up_to_odd_integer(value): + i = int(math.ceil(value)) # TODO: int() call is only needed for six.PY2 + if i % 2 == 0: + return i + 1 + else: + return i + + +class Gaussian1DKernel(Kernel1D): + """ + 1D Gaussian filter kernel. + + The Gaussian filter is a filter with great smoothing properties. It is + isotropic and does not produce artifacts. + + Parameters + ---------- + stddev : number + Standard deviation of the Gaussian kernel. + x_size : odd int, optional + Size of the kernel array. Default = 8 * stddev + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. Very slow. + factor : number, optional + Factor of oversampling. Default factor = 10. If the factor + is too large, evaluation can be very slow. + + + See Also + -------- + Box1DKernel, Trapezoid1DKernel, MexicanHat1DKernel + + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Gaussian1DKernel + gauss_1D_kernel = Gaussian1DKernel(10) + plt.plot(gauss_1D_kernel, drawstyle='steps') + plt.xlabel('x [pixels]') + plt.ylabel('value') + plt.show() + """ + _separable = True + _is_bool = False + + def __init__(self, stddev, **kwargs): + self._model = models.Gaussian1D(1. / (np.sqrt(2 * np.pi) * stddev), + 0, stddev) + self._default_size = _round_up_to_odd_integer(8 * stddev) + super(Gaussian1DKernel, self).__init__(**kwargs) + self._truncation = np.abs(1. - self._array.sum()) + + +class Gaussian2DKernel(Kernel2D): + """ + 2D Gaussian filter kernel. + + The Gaussian filter is a filter with great smoothing properties. It is + isotropic and does not produce artifacts. + + Parameters + ---------- + stddev : number + Standard deviation of the Gaussian kernel. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * stddev. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * stddev. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel, + TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Gaussian2DKernel + gaussian_2D_kernel = Gaussian2DKernel(10) + plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + + """ + _separable = True + _is_bool = False + + def __init__(self, stddev, **kwargs): + self._model = models.Gaussian2D(1. / (2 * np.pi * stddev ** 2), 0, + 0, stddev, stddev) + self._default_size = _round_up_to_odd_integer(8 * stddev) + super(Gaussian2DKernel, self).__init__(**kwargs) + self._truncation = np.abs(1. - self._array.sum()) + + +class Box1DKernel(Kernel1D): + """ + 1D Box filter kernel. + + The Box filter or running mean is a smoothing filter. It is not isotropic + and can produce artifacts, when applied repeatedly to the same data. + + By default the Box kernel uses the ``linear_interp`` discretization mode, + which allows non-shifting, even-sized kernels. This is achieved by + weighting the edge pixels with 1/2. E.g a Box kernel with an effective + smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5]. + + + Parameters + ---------- + width : number + Width of the filter kernel. + mode : str, optional + One of the following discretization modes: + * 'center' + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' (default) + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian1DKernel, Trapezoid1DKernel, MexicanHat1DKernel + + + Examples + -------- + Kernel response function: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Box1DKernel + box_1D_kernel = Box1DKernel(9) + plt.plot(box_1D_kernel, drawstyle='steps') + plt.xlim(-1, 9) + plt.xlabel('x [pixels]') + plt.ylabel('value') + plt.show() + + """ + _separable = True + _is_bool = True + + def __init__(self, width, **kwargs): + self._model = models.Box1D(1. / width, 0, width) + self._default_size = _round_up_to_odd_integer(width) + kwargs['mode'] = 'linear_interp' + super(Box1DKernel, self).__init__(**kwargs) + self._truncation = 0 + self.normalize() + + +class Box2DKernel(Kernel2D): + """ + 2D Box filter kernel. + + The Box filter or running mean is a smoothing filter. It is not isotropic + and can produce artifact, when applied repeatedly to the same data. + + By default the Box kernel uses the ``linear_interp`` discretization mode, + which allows non-shifting, even-sized kernels. This is achieved by + weighting the edge pixels with 1/2. + + + Parameters + ---------- + width : number + Width of the filter kernel. + mode : str, optional + One of the following discretization modes: + * 'center' + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' (default) + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Gaussian2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel, + TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Box2DKernel + box_2D_kernel = Box2DKernel(9) + plt.imshow(box_2D_kernel, interpolation='none', origin='lower', + vmin=0.0, vmax=0.015) + plt.xlim(-1, 9) + plt.ylim(-1, 9) + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + _separable = True + _is_bool = True + + def __init__(self, width, **kwargs): + self._model = models.Box2D(1. / width ** 2, 0, 0, width, width) + self._default_size = _round_up_to_odd_integer(width) + kwargs['mode'] = 'linear_interp' + super(Box2DKernel, self).__init__(**kwargs) + self._truncation = 0 + self.normalize() + + +class Tophat2DKernel(Kernel2D): + """ + 2D Tophat filter kernel. + + The Tophat filter is an isotropic smoothing filter. It can produce + artifacts when applied repeatedly on the same data. + + Parameters + ---------- + radius : int + Radius of the filter kernel. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Gaussian2DKernel, Box2DKernel, MexicanHat2DKernel, Ring2DKernel, + TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Tophat2DKernel + tophat_2D_kernel = Tophat2DKernel(40) + plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + + """ + def __init__(self, radius, **kwargs): + self._model = models.Disk2D(1. / (np.pi * radius ** 2), 0, 0, radius) + self._default_size = _round_up_to_odd_integer(2 * radius) + super(Tophat2DKernel, self).__init__(**kwargs) + self._truncation = 0 + + +class Ring2DKernel(Kernel2D): + """ + 2D Ring filter kernel. + + The Ring filter kernel is the difference between two Tophat kernels of + different width. This kernel is useful for, e.g., background estimation. + + Parameters + ---------- + radius_in : number + Inner radius of the ring kernel. + width : number + Width of the ring kernel. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, + Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Ring2DKernel + ring_2D_kernel = Ring2DKernel(9, 8) + plt.imshow(ring_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + def __init__(self, radius_in, width, **kwargs): + radius_out = radius_in + width + self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)), + 0, 0, radius_in, width) + self._default_size = _round_up_to_odd_integer(2 * radius_out) + super(Ring2DKernel, self).__init__(**kwargs) + self._truncation = 0 + + +class Trapezoid1DKernel(Kernel1D): + """ + 1D trapezoid kernel. + + Parameters + ---------- + width : number + Width of the filter kernel, defined as the width of the constant part, + before it begins to slope down. + slope : number + Slope of the filter kernel's tails + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Box1DKernel, Gaussian1DKernel, MexicanHat1DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Trapezoid1DKernel + trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2) + plt.plot(trapezoid_1D_kernel, drawstyle='steps') + plt.xlabel('x [pixels]') + plt.ylabel('amplitude') + plt.xlim(-1, 28) + plt.show() + """ + _is_bool = False + + def __init__(self, width, slope=1., **kwargs): + self._model = models.Trapezoid1D(1, 0, width, slope) + self._default_size = _round_up_to_odd_integer(width + 2. / slope) + super(Trapezoid1DKernel, self).__init__(**kwargs) + self._truncation = 0 + self.normalize() + + +class TrapezoidDisk2DKernel(Kernel2D): + """ + 2D trapezoid kernel. + + Parameters + ---------- + radius : number + Width of the filter kernel, defined as the width of the constant part, + before it begins to slope down. + slope : number + Slope of the filter kernel's tails + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, + Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import TrapezoidDisk2DKernel + trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2) + plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + + """ + _is_bool = False + + def __init__(self, radius, slope=1., **kwargs): + self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope) + self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope) + super(TrapezoidDisk2DKernel, self).__init__(**kwargs) + self._truncation = 0 + self.normalize() + + +class MexicanHat1DKernel(Kernel1D): + """ + 1D Mexican hat filter kernel. + + The Mexican Hat, or inverted Gaussian-Laplace filter, is a + bandpass filter. It smoothes the data and removes slowly varying + or constant structures (e.g. Background). It is useful for peak or + multi-scale detection. + + This kernel is derived from a normalized Gaussian function, by + computing the second derivative. This results in an amplitude + at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The + normalization is the same as for `scipy.ndimage.gaussian_laplace`, + except for a minus sign. + + Parameters + ---------- + width : number + Width of the filter kernel, defined as the standard deviation + of the Gaussian function from which it is derived. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import MexicanHat1DKernel + mexicanhat_1D_kernel = MexicanHat1DKernel(10) + plt.plot(mexicanhat_1D_kernel, drawstyle='steps') + plt.xlabel('x [pixels]') + plt.ylabel('value') + plt.show() + + """ + _is_bool = True + + def __init__(self, width, **kwargs): + amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3) + self._model = models.MexicanHat1D(amplitude, 0, width) + self._default_size = _round_up_to_odd_integer(8 * width) + super(MexicanHat1DKernel, self).__init__(**kwargs) + self._truncation = np.abs(self._array.sum() / self._array.size) + + +class MexicanHat2DKernel(Kernel2D): + """ + 2D Mexican hat filter kernel. + + The Mexican Hat, or inverted Gaussian-Laplace filter, is a + bandpass filter. It smoothes the data and removes slowly varying + or constant structures (e.g. Background). It is useful for peak or + multi-scale detection. + + This kernel is derived from a normalized Gaussian function, by + computing the second derivative. This results in an amplitude + at the kernels center of 1. / (pi * width ** 4). The normalization + is the same as for `scipy.ndimage.gaussian_laplace`, except + for a minus sign. + + Parameters + ---------- + width : number + Width of the filter kernel, defined as the standard deviation + of the Gaussian function from which it is derived. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * width. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel, + TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import MexicanHat2DKernel + mexicanhat_2D_kernel = MexicanHat2DKernel(10) + plt.imshow(mexicanhat_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + _is_bool = False + + def __init__(self, width, **kwargs): + amplitude = 1.0 / (np.pi * width ** 4) + self._model = models.MexicanHat2D(amplitude, 0, 0, width) + self._default_size = _round_up_to_odd_integer(8 * width) + super(MexicanHat2DKernel, self).__init__(**kwargs) + self._truncation = np.abs(self._array.sum() / self._array.size) + + +class AiryDisk2DKernel(Kernel2D): + """ + 2D Airy disk kernel. + + This kernel models the diffraction pattern of a circular aperture. This + kernel is normalized to a peak value of 1. + + Parameters + ---------- + radius : float + The radius of the Airy disk kernel (radius of the first zero). + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * radius. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * radius. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, + Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import AiryDisk2DKernel + airydisk_2D_kernel = AiryDisk2DKernel(10) + plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + _is_bool = False + + def __init__(self, radius, **kwargs): + self._model = models.AiryDisk2D(1, 0, 0, radius) + self._default_size = _round_up_to_odd_integer(8 * radius) + super(AiryDisk2DKernel, self).__init__(**kwargs) + self.normalize() + self._truncation = None + + +class Moffat2DKernel(Kernel2D): + """ + 2D Moffat kernel. + + This kernel is a typical model for a seeing limited PSF. + + Parameters + ---------- + gamma : float + Core width of the Moffat model. + alpha : float + Power index of the Moffat model. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * radius. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * radius. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, + Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Moffat2DKernel + moffat_2D_kernel = Moffat2DKernel(3, 2) + plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + _is_bool = False + + def __init__(self, gamma, alpha, **kwargs): + self._model = models.Moffat2D((gamma - 1.0) / (np.pi * alpha * alpha), + 0, 0, gamma, alpha) + fwhm = 2.0 * alpha * (2.0 ** (1.0 / gamma) - 1.0) ** 0.5 + self._default_size = _round_up_to_odd_integer(4.0 * fwhm) + super(Moffat2DKernel, self).__init__(**kwargs) + self.normalize() + self._truncation = None + + +class Model1DKernel(Kernel1D): + """ + Create kernel from 1D model. + + The model has to be centered on x = 0. + + Parameters + ---------- + model : `~astropy.modeling.Fittable1DModel` + Kernel response function model + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + Raises + ------ + TypeError + If model is not an instance of `~astropy.modeling.Fittable1DModel` + + See also + -------- + Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel` + CustomKernel : Create kernel from list or array + + Examples + -------- + Define a Gaussian1D model: + + >>> from astropy.modeling.models import Gaussian1D + >>> from astropy.convolution.kernels import Model1DKernel + >>> gauss = Gaussian1D(1, 0, 2) + + And create a custom one dimensional kernel from it: + + >>> gauss_kernel = Model1DKernel(gauss, x_size=9) + + This kernel can now be used like a usual Astropy kernel. + """ + _separable = False + _is_bool = False + + def __init__(self, model, **kwargs): + if isinstance(model, Fittable1DModel): + self._model = model + else: + raise TypeError("Must be Fittable1DModel") + super(Model1DKernel, self).__init__(**kwargs) + + +class Model2DKernel(Kernel2D): + """ + Create kernel from 2D model. + + The model has to be centered on x = 0 and y = 0. + + Parameters + ---------- + model : `~astropy.modeling.Fittable2DModel` + Kernel response function model + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * width. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + Raises + ------ + TypeError + If model is not an instance of `~astropy.modeling.Fittable2DModel` + + See also + -------- + Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel` + CustomKernel : Create kernel from list or array + + Examples + -------- + Define a Gaussian2D model: + + >>> from astropy.modeling.models import Gaussian2D + >>> from astropy.convolution.kernels import Model2DKernel + >>> gauss = Gaussian2D(1, 0, 0, 2, 2) + + And create a custom two dimensional kernel from it: + + >>> gauss_kernel = Model2DKernel(gauss, x_size=9) + + This kernel can now be used like a usual astropy kernel. + + """ + _is_bool = False + _separable = False + + def __init__(self, model, **kwargs): + self._separable = False + if isinstance(model, Fittable2DModel): + self._model = model + else: + raise TypeError("Must be Fittable2DModel") + super(Model2DKernel, self).__init__(**kwargs) + + +class PSFKernel(Kernel2D): + """ + Initialize filter kernel from astropy PSF instance. + """ + _separable = False + + def __init__(self): + raise NotImplementedError('Not yet implemented') + + +class CustomKernel(Kernel): + """ + Create filter kernel from list or array. + + Parameters + ---------- + array : list or array + Filter kernel array. Size must be odd. + + Raises + ------ + TypeError + If array is not a list or array. + KernelSizeError + If array size is even. + + See also + -------- + Model2DKernel, Model1DKernel + + Examples + -------- + Define one dimensional array: + + >>> from astropy.convolution.kernels import CustomKernel + >>> import numpy as np + >>> array = np.array([1, 2, 3, 2, 1]) + >>> kernel = CustomKernel(array) + >>> kernel.dimension + 1 + + Define two dimensional array: + + >>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]) + >>> kernel = CustomKernel(array) + >>> kernel.dimension + 2 + """ + def __init__(self, array): + self.array = array + super(CustomKernel, self).__init__(self._array) + + @property + def array(self): + """ + Filter kernel array. + """ + return self._array + + @array.setter + def array(self, array): + """ + Filter kernel array setter + """ + if isinstance(array, np.ndarray): + self._array = array.astype(np.float64) + elif isinstance(array, list): + self._array = np.array(array, dtype=np.float64) + else: + raise TypeError("Must be list or array.") + + # Check if array is odd in all axes + odd = all(axes_size % 2 != 0 for axes_size in self.shape) + if not odd: + raise KernelSizeError("Kernel size must be odd in all axes.") + + # Check if array is bool + ones = self._array == 1. + zeros = self._array == 0 + self._is_bool = bool(np.all(np.logical_or(ones, zeros))) + + self._truncation = 0.0 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/kernels.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/kernels.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67096d1bd1e5b31e5146abd97a27ab4c26e4be7e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/kernels.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/setup_package.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/setup_package.py new file mode 100644 index 0000000000000000000000000000000000000000..3cd9f7c3d928c2b9a57845c6438b77d8ca63de27 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/setup_package.py @@ -0,0 +1,5 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def requires_2to3(): + return False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/setup_package.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/setup_package.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f220a12e14dbb842636f9ecc8a97eed33a1c279d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/setup_package.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2574c8aa52178075f3ddc6703c520f08cf0e468 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve.py new file mode 100644 index 0000000000000000000000000000000000000000..a0690d61089062765aa33f7a3135a4e90f03c219 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve.py @@ -0,0 +1,809 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np +import numpy.ma as ma + +from ..convolve import convolve, convolve_fft + +from numpy.testing import assert_array_almost_equal_nulp, assert_array_almost_equal + +import itertools + +VALID_DTYPES = [] +for dtype_array in ['>f4', 'f8', 'f4', 'f8', 'f8'), [3, 3, 3]), 10) + elif boundary == 'extend': + assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.], + [72., 63., 54.], + [82., 75., 68.]], + [[93., 68., 43.], + [105., 78., 51.], + [117., 88., 59.]], + [[124., 85., 46.], + [138., 93., 48.], + [152., 101., 50.]]], + dtype='>f8')/kernsum, 10) + else: + raise ValueError("Invalid Boundary Option") + + +@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) +def test_asymmetric_kernel(boundary): + ''' + Regression test for #6264: make sure that asymmetric convolution + functions go the right direction + ''' + + x = np.array([3., 0., 1.], dtype='>f8') + + y = np.array([1, 2, 3], dtype='>f8') + + z = convolve(x, y, boundary=boundary, normalize_kernel=False) + + if boundary == 'fill': + assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10) + elif boundary is None: + assert_array_almost_equal_nulp(z, np.array([0., 10., 0.], dtype='float'), 10) + elif boundary == 'extend': + assert_array_almost_equal_nulp(z, np.array([15., 10., 3.], dtype='float'), 10) + elif boundary == 'wrap': + assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10) + + +@pytest.mark.parametrize('ndims', (1, 2, 3)) +def test_convolution_consistency(ndims): + + np.random.seed(0) + array = np.random.randn(*([3]*ndims)) + np.random.seed(0) + kernel = np.random.rand(*([3]*ndims)) + + conv_f = convolve_fft(array, kernel, boundary='fill') + conv_d = convolve(array, kernel, boundary='fill') + + assert_array_almost_equal_nulp(conv_f, conv_d, 30) + + +def test_astropy_convolution_against_numpy(): + x = np.array([1, 2, 3]) + y = np.array([5, 4, 3, 2, 1]) + + assert_array_almost_equal(np.convolve(y, x, 'same'), + convolve(y, x, normalize_kernel=False)) + assert_array_almost_equal(np.convolve(y, x, 'same'), + convolve_fft(y, x, normalize_kernel=False)) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_astropy_convolution_against_scipy(): + from scipy.signal import fftconvolve + x = np.array([1, 2, 3]) + y = np.array([5, 4, 3, 2, 1]) + + assert_array_almost_equal(fftconvolve(y, x, 'same'), + convolve(y, x, normalize_kernel=False)) + assert_array_almost_equal(fftconvolve(y, x, 'same'), + convolve_fft(y, x, normalize_kernel=False)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f182734f4836fda6d016e0524c8241cd613e06d7 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_fft.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_fft.py new file mode 100644 index 0000000000000000000000000000000000000000..dd2a857e559cdc60a3c38eaa27ff280af7dcf9da --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_fft.py @@ -0,0 +1,617 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_allclose, assert_array_almost_equal_nulp + +from ..convolve import convolve_fft + + +VALID_DTYPES = [] +for dtype_array in ['>f4', 'f8', 'f4', 'f8', ' exception + # if nan_treatment and not normalize_kernel: + # with pytest.raises(ValueError): + # z = convolve_fft(x, y, boundary=boundary, + # nan_treatment=nan_treatment, + # normalize_kernel=normalize_kernel, + # ignore_edge_zeros=ignore_edge_zeros, + # ) + # return + + z = convolve_fft(x, y, boundary=boundary, + nan_treatment=nan_treatment, + fill_value=np.nan if normalize_kernel else 0, + normalize_kernel=normalize_kernel, + preserve_nan=preserve_nan) + + if preserve_nan: + assert np.isnan(z[1, 1]) + + # weights + w_n = np.array([[3., 5., 3.], + [5., 8., 5.], + [3., 5., 3.]], dtype='float64') + w_z = np.array([[4., 6., 4.], + [6., 9., 6.], + [4., 6., 4.]], dtype='float64') + answer_dict = { + 'sum': np.array([[1., 4., 3.], + [3., 6., 5.], + [3., 3., 2.]], dtype='float64'), + 'sum_wrap': np.array([[6., 6., 6.], + [6., 6., 6.], + [6., 6., 6.]], dtype='float64'), + } + answer_dict['average'] = answer_dict['sum'] / w_z + answer_dict['average_interpnan'] = answer_dict['sum'] / w_n + answer_dict['average_wrap_interpnan'] = answer_dict['sum_wrap'] / 8. + answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9. + answer_dict['average_withzeros'] = answer_dict['sum'] / 9. + answer_dict['average_withzeros_interpnan'] = answer_dict['sum'] / 8. + answer_dict['sum_withzeros'] = answer_dict['sum'] + answer_dict['sum_interpnan'] = answer_dict['sum'] * 9/8. + answer_dict['sum_withzeros_interpnan'] = answer_dict['sum'] + answer_dict['sum_wrap_interpnan'] = answer_dict['sum_wrap'] * 9/8. + + if normalize_kernel: + answer_key = 'average' + else: + answer_key = 'sum' + + if boundary == 'wrap': + answer_key += '_wrap' + elif nan_treatment == 'fill': + answer_key += '_withzeros' + + if nan_treatment == 'interpolate': + answer_key += '_interpnan' + + a = answer_dict[answer_key] + + # Skip the NaN at [1, 1] when preserve_nan=True + posns = np.where(np.isfinite(z)) + + # for reasons unknown, the Windows FFT returns an answer for the [0, 0] + # component that is EXACTLY 10*np.spacing + assert_floatclose(z[posns], z[posns]) + + def test_big_fail(self): + """ Test that convolve_fft raises an exception if a too-large array is passed in """ + + with pytest.raises((ValueError, MemoryError)): + # while a good idea, this approach did not work; it actually writes to disk + # arr = np.memmap('file.np', mode='w+', shape=(512, 512, 512), dtype=np.complex) + # this just allocates the memory but never touches it; it's better: + arr = np.empty([512, 512, 512], dtype=np.complex) + # note 512**3 * 16 bytes = 2.0 GB + convolve_fft(arr, arr) + + @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) + def test_non_normalized_kernel(self, boundary): + + x = np.array([[0., 0., 4.], + [1., 2., 0.], + [0., 3., 0.]], dtype='float') + + y = np.array([[1., -1., 1.], + [-1., 0., -1.], + [1., -1., 1.]], dtype='float') + + z = convolve_fft(x, y, boundary=boundary, nan_treatment='fill', + normalize_kernel=False) + + if boundary in (None, 'fill'): + assert_floatclose(z, np.array([[1., -5., 2.], + [1., 0., -3.], + [-2., -1., -1.]], dtype='float')) + elif boundary == 'wrap': + assert_floatclose(z, np.array([[0., -8., 6.], + [5., 0., -4.], + [2., 3., -4.]], dtype='float')) + else: + raise ValueError("Invalid boundary specification") + +@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) +def test_asymmetric_kernel(boundary): + ''' + Make sure that asymmetric convolution + functions go the right direction + ''' + + x = np.array([3., 0., 1.], dtype='>f8') + + y = np.array([1, 2, 3], dtype='>f8') + + z = convolve_fft(x, y, boundary=boundary, normalize_kernel=False) + + if boundary in (None, 'fill'): + assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10) + elif boundary == 'wrap': + assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_fft.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_fft.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2974c035bf437b69152e44dd8b9b2723918eef83 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_fft.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_kernels.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..fc237947baafd82058ac2b0dde6f64a78a0bc5f2 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_kernels.py @@ -0,0 +1,130 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_almost_equal + +from ..convolve import convolve, convolve_fft +from ..kernels import Gaussian2DKernel, Box2DKernel, Tophat2DKernel +from ..kernels import Moffat2DKernel + + +SHAPES_ODD = [[15, 15], [31, 31]] +SHAPES_EVEN = [[8, 8], [16, 16], [32, 32]] +WIDTHS = [2, 3, 4, 5] + +KERNELS = [] + +for shape in SHAPES_ODD: + for width in WIDTHS: + + KERNELS.append(Gaussian2DKernel(width, + x_size=shape[0], + y_size=shape[1], + mode='oversample', + factor=10)) + + KERNELS.append(Box2DKernel(width, + x_size=shape[0], + y_size=shape[1], + mode='oversample', + factor=10)) + + KERNELS.append(Tophat2DKernel(width, + x_size=shape[0], + y_size=shape[1], + mode='oversample', + factor=10)) + KERNELS.append(Moffat2DKernel(width, 2, + x_size=shape[0], + y_size=shape[1], + mode='oversample', + factor=10)) + + +class Test2DConvolutions(object): + + @pytest.mark.parametrize('kernel', KERNELS) + def test_centered_makekernel(self, kernel): + """ + Test smoothing of an image with a single positive pixel + """ + + shape = kernel.array.shape + + x = np.zeros(shape) + xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape] + x[xslice] = 1.0 + + c2 = convolve_fft(x, kernel, boundary='fill') + c1 = convolve(x, kernel, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize('kernel', KERNELS) + def test_random_makekernel(self, kernel): + """ + Test smoothing of an image made of random noise + """ + + shape = kernel.array.shape + + x = np.random.randn(*shape) + + c2 = convolve_fft(x, kernel, boundary='fill') + c1 = convolve(x, kernel, boundary='fill') + + # not clear why, but these differ by a couple ulps... + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, WIDTHS))) + def test_uniform_smallkernel(self, shape, width): + """ + Test smoothing of an image with a single positive pixel + + Uses a simple, small kernel + """ + + if width % 2 == 0: + # convolve does not accept odd-shape kernels + return + + kernel = np.ones([width, width]) + + x = np.zeros(shape) + xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape] + x[xslice] = 1.0 + + c2 = convolve_fft(x, kernel, boundary='fill') + c1 = convolve(x, kernel, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, [1, 3, 5]))) + def test_smallkernel_Box2DKernel(self, shape, width): + """ + Test smoothing of an image with a single positive pixel + + Compares a small uniform kernel to the Box2DKernel + """ + + kernel1 = np.ones([width, width]) / np.float(width) ** 2 + kernel2 = Box2DKernel(width, mode='oversample', factor=10) + + x = np.zeros(shape) + xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape] + x[xslice] = 1.0 + + c2 = convolve_fft(x, kernel2, boundary='fill') + c1 = convolve_fft(x, kernel1, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) + + c2 = convolve(x, kernel2, boundary='fill') + c1 = convolve(x, kernel1, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_kernels.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_kernels.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc8bd642bbf38abaa09180bae7d7e4a56f4b5af6 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_kernels.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_models.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_models.py new file mode 100644 index 0000000000000000000000000000000000000000..2b54d30bd04f9e9323d670026dd4673cef953a16 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_models.py @@ -0,0 +1,107 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import math +import numpy as np +import pytest + +from ..convolve import convolve, convolve_fft, convolve_models +from ...modeling import models, fitting +from ...utils.misc import NumpyRNGContext +from numpy.testing import assert_allclose, assert_almost_equal + +try: + import scipy +except ImportError: + HAS_SCIPY = False +else: + HAS_SCIPY = True + + +class TestConvolve1DModels(object): + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + @pytest.mark.skipif('not HAS_SCIPY') + def test_is_consistency_with_astropy_convolution(self, mode): + kernel = models.Gaussian1D(1, 0, 1) + model = models.Gaussian1D(1, 0, 1) + model_conv = convolve_models(model, kernel, mode=mode) + x = np.arange(-5, 6) + ans = eval("{}(model(x), kernel(x))".format(mode)) + + assert_allclose(ans, model_conv(x), atol=1e-5) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + @pytest.mark.skipif('not HAS_SCIPY') + def test_against_scipy(self, mode): + from scipy.signal import fftconvolve + + kernel = models.Gaussian1D(1, 0, 1) + model = models.Gaussian1D(1, 0, 1) + model_conv = convolve_models(model, kernel, mode=mode) + x = np.arange(-5, 6) + ans = fftconvolve(kernel(x), model(x), mode='same') + + assert_allclose(ans, model_conv(x) * kernel(x).sum(), atol=1e-5) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + @pytest.mark.skipif('not HAS_SCIPY') + def test_against_scipy_with_additional_keywords(self, mode): + from scipy.signal import fftconvolve + + kernel = models.Gaussian1D(1, 0, 1) + model = models.Gaussian1D(1, 0, 1) + model_conv = convolve_models(model, kernel, mode=mode, + normalize_kernel=False) + x = np.arange(-5, 6) + ans = fftconvolve(kernel(x), model(x), mode='same') + + assert_allclose(ans, model_conv(x), atol=1e-5) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + def test_sum_of_gaussians(self, mode): + """ + Test that convolving N(a, b) with N(c, d) gives N(a + c, b + d), + where N(., .) stands for Gaussian probability density function, + in which a and c are their means and b and d are their variances. + """ + + kernel = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 1, 1) + model = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 3, 1) + model_conv = convolve_models(model, kernel, mode=mode, + normalize_kernel=False) + ans = models.Gaussian1D(1 / (2 * math.sqrt(np.pi)), 4, np.sqrt(2)) + x = np.arange(-5, 6) + + assert_allclose(ans(x), model_conv(x), atol=1e-3) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + def test_convolve_box_models(self, mode): + kernel = models.Box1D() + model = models.Box1D() + model_conv = convolve_models(model, kernel, mode=mode) + x = np.linspace(-1, 1, 99) + ans = (x + 1) * (x < 0) + (-x + 1) * (x >= 0) + + assert_allclose(ans, model_conv(x), atol=1e-3) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + @pytest.mark.skipif('not HAS_SCIPY') + def test_fitting_convolve_models(self, mode): + """ + test that a convolve model can be fitted + """ + b1 = models.Box1D() + g1 = models.Gaussian1D() + + x = np.linspace(-5, 5, 99) + fake_model = models.Gaussian1D(amplitude=10) + with NumpyRNGContext(123): + fake_data = fake_model(x) + np.random.normal(size=len(x)) + + init_model = convolve_models(b1, g1, mode=mode, normalize_kernel=False) + fitter = fitting.LevMarLSQFitter() + fitted_model = fitter(init_model, x, fake_data) + + me = np.mean(fitted_model(x) - fake_data) + assert_almost_equal(me, 0.0, decimal=2) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_models.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_models.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35263281c4bb8f8de4c6d24edbfc2e61e739c3ec Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_models.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_nddata.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_nddata.py new file mode 100644 index 0000000000000000000000000000000000000000..93e801be2af161dceb0d1582c76e79880124c525 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_nddata.py @@ -0,0 +1,58 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np + +from ..convolve import convolve, convolve_fft +from ..kernels import Gaussian2DKernel +from ...nddata import NDData + + +def test_basic_nddata(): + arr = np.zeros((11, 11)) + arr[5, 5] = 1 + ndd = NDData(arr) + test_kernel = Gaussian2DKernel(1) + + result = convolve(ndd, test_kernel) + + x, y = np.mgrid[:11, :11] + expected = result[5, 5] * np.exp(-0.5 * ((x - 5)**2 + (y - 5)**2)) + + np.testing.assert_allclose(result, expected, atol=1e-6) + + resultf = convolve_fft(ndd, test_kernel) + np.testing.assert_allclose(resultf, expected, atol=1e-6) + + +@pytest.mark.parametrize('convfunc', + [lambda *args: convolve(*args, nan_treatment='interpolate', normalize_kernel=True), + lambda *args: convolve_fft(*args, nan_treatment='interpolate', normalize_kernel=True)]) +def test_masked_nddata(convfunc): + arr = np.zeros((11, 11)) + arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2 + arr[5, 5] = 1.5 + ndd_base = NDData(arr) + + mask = arr < 0 # this is all False + mask[5, 5] = True + ndd_mask = NDData(arr, mask=mask) + + arrnan = arr.copy() + arrnan[5, 5] = np.nan + ndd_nan = NDData(arrnan) + + test_kernel = Gaussian2DKernel(1) + + result_base = convfunc(ndd_base, test_kernel) + result_nan = convfunc(ndd_nan, test_kernel) + result_mask = convfunc(ndd_mask, test_kernel) + + assert np.allclose(result_nan, result_mask) + assert not np.allclose(result_base, result_mask) + assert not np.allclose(result_base, result_nan) + + # check to make sure the mask run doesn't talk back to the initial array + assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_nddata.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_nddata.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e74c27c719b18c6b2a9c626308388acbe8f4ff7 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_nddata.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_speeds.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_speeds.py new file mode 100644 index 0000000000000000000000000000000000000000..8ae39741641c5cf4c379e33a7d19e9d472a2726c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_speeds.py @@ -0,0 +1,187 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import timeit + +import numpy as np # pylint: disable=W0611 + +from ...extern.six.moves import range, zip + +# largest image size to use for "linear" and fft convolutions +max_exponents_linear = {1: 15, 2: 7, 3: 5} +max_exponents_fft = {1: 15, 2: 10, 3: 7} + +if __name__ == "__main__": + for ndims in [1, 2, 3]: + print("\n{}-dimensional arrays ('n' is the size of the image AND " + "the kernel)".format(ndims)) + print(" ".join(["%17s" % n for n in ("n", "convolve", "convolve_fft")])) + + for ii in range(3, max_exponents_fft[ndims]): + # array = np.random.random([2**ii]*ndims) + # test ODD sizes too + if ii < max_exponents_fft[ndims]: + setup = (""" +import numpy as np +from astropy.convolution.convolve import convolve +from astropy.convolution.convolve import convolve_fft +array = np.random.random([%i]*%i) +kernel = np.random.random([%i]*%i)""") % (2 ** ii - 1, ndims, 2 ** ii - 1, ndims) + + print("%16i:" % (int(2 ** ii - 1)), end=' ') + + if ii <= max_exponents_linear[ndims]: + for ffttype, extra in zip(("", "_fft"), + ("", "fft_pad=False")): + statement = "convolve{}(array, kernel, boundary='fill', {})".format(ffttype, extra) + besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) + print("%17f" % (besttime), end=' ') + else: + print("%17s" % "skipped", end=' ') + statement = "convolve_fft(array, kernel, boundary='fill')" + besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) + print("%17f" % (besttime), end=' ') + + print() + + setup = (""" +import numpy as np +from astropy.convolution.convolve import convolve +from astropy.convolution.convolve import convolve_fft +array = np.random.random([%i]*%i) +kernel = np.random.random([%i]*%i)""") % (2 ** ii - 1, ndims, 2 ** ii - 1, ndims) + + print("%16i:" % (int(2 ** ii)), end=' ') + + if ii <= max_exponents_linear[ndims]: + for ffttype in ("", "_fft"): + statement = "convolve{}(array, kernel, boundary='fill')".format(ffttype) + besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) + print("%17f" % (besttime), end=' ') + else: + print("%17s" % "skipped", end=' ') + statement = "convolve_fft(array, kernel, boundary='fill')" + besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) + print("%17f" % (besttime), end=' ') + + print() + +""" +Unfortunately, these tests are pretty strongly inconclusive + +RESULTS on a 2011 Mac Air: +1-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.000408 0.002334 0.005571 0.002677 + 8: 0.000399 0.002818 0.006505 0.003094 + 15: 0.000361 0.002491 0.005648 0.002678 + 16: 0.000371 0.002997 0.005983 0.003036 + 31: 0.000535 0.002450 0.005988 0.002880 + 32: 0.000452 0.002618 0.007102 0.004366 + 63: 0.000509 0.002876 0.008003 0.002981 + 64: 0.000453 0.002706 0.005520 0.003049 + 127: 0.000801 0.004080 0.008513 0.003932 + 128: 0.000749 0.003332 0.006236 0.003159 + 255: 0.002453 0.003111 0.007518 0.003564 + 256: 0.002478 0.003341 0.006325 0.004290 + 511: 0.008394 0.006224 0.010247 0.005991 + 512: 0.007934 0.003764 0.006840 0.004106 + 1023: 0.028741 0.007538 0.009591 0.007696 + 1024: 0.027900 0.004871 0.009628 0.005118 + 2047: 0.106323 0.021575 0.022041 0.020682 + 2048: 0.108916 0.008107 0.011049 0.007596 + 4095: 0.411936 0.021675 0.019761 0.020939 + 4096: 0.408992 0.018870 0.016663 0.012890 + 8191: 1.664517 8.278320 0.073001 7.803563 + 8192: 1.657573 0.037967 0.034227 0.028390 + 16383: 6.654678 0.251661 0.202271 0.222171 + 16384: 6.611977 0.073630 0.067616 0.055591 + +2-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.000552 0.003524 0.006667 0.004318 + 8: 0.000646 0.004443 0.007354 0.003958 + 15: 0.002986 0.005093 0.012941 0.005951 + 16: 0.003549 0.005688 0.008818 0.006300 + 31: 0.074360 0.033973 0.031800 0.036937 + 32: 0.077338 0.017708 0.025637 0.011883 + 63: 0.848471 0.057407 0.052192 0.053213 + 64: 0.773061 0.029657 0.033409 0.028230 + 127: 14.656414 1.005329 0.402113 0.955279 + 128: 15.867796 0.266233 0.268551 0.237930 + 255: skipped 1.715546 1.566876 1.745338 + 256: skipped 1.515616 1.268220 1.036881 + 511: skipped 4.066155 4.303350 3.930661 + 512: skipped 3.976139 4.337525 3.968935 + +3-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.009239 0.012957 0.011957 0.015997 + 8: 0.012405 0.011328 0.011677 0.012283 + 15: 0.772434 0.075621 0.056711 0.079508 + 16: 0.964635 0.105846 0.072811 0.104611 + 31: 62.824051 2.295193 1.189505 2.351136 + 32: 79.507060 1.169182 0.821779 1.275770 + 63: skipped 11.250225 10.982726 10.585744 + 64: skipped 10.013558 11.507645 12.665557 + + + +On a 2009 Mac Pro: +1-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.000360 0.002269 0.004986 0.002476 + 8: 0.000361 0.002468 0.005242 0.002696 + 15: 0.000364 0.002255 0.005244 0.002471 + 16: 0.000365 0.002506 0.005286 0.002727 + 31: 0.000385 0.002380 0.005422 0.002588 + 32: 0.000385 0.002531 0.005543 0.002737 + 63: 0.000474 0.002407 0.005392 0.002637 + 64: 0.000484 0.002602 0.005631 0.002823 + 127: 0.000752 0.004122 0.007827 0.003966 + 128: 0.000757 0.002763 0.005844 0.002958 + 255: 0.004316 0.003258 0.006566 0.003324 + 256: 0.004354 0.003180 0.006120 0.003245 + 511: 0.011517 0.007158 0.009898 0.006238 + 512: 0.011482 0.003873 0.006777 0.003820 + 1023: 0.034105 0.009211 0.009468 0.008260 + 1024: 0.034609 0.005504 0.008399 0.005080 + 2047: 0.113620 0.028097 0.020662 0.021603 + 2048: 0.112828 0.008403 0.010939 0.007331 + 4095: 0.403373 0.023211 0.018767 0.020065 + 4096: 0.403316 0.017550 0.017853 0.013651 + 8191: 1.519329 8.454573 0.211436 7.212381 + 8192: 1.519082 0.033148 0.030370 0.025905 + 16383: 5.887481 0.317428 0.153344 0.237119 + 16384: 5.888222 0.069379 0.065264 0.052847 + +2-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.000474 0.003470 0.006131 0.003503 + 8: 0.000503 0.003565 0.006400 0.003586 + 15: 0.002011 0.004481 0.007825 0.004496 + 16: 0.002236 0.004744 0.007078 0.004680 + 31: 0.027291 0.019433 0.014841 0.018034 + 32: 0.029283 0.009244 0.010161 0.008964 + 63: 0.445680 0.038171 0.026753 0.037404 + 64: 0.460616 0.028128 0.029487 0.029149 + 127: 7.003774 0.925921 0.282591 0.762671 + 128: 7.063657 0.110838 0.104402 0.133523 + 255: skipped 0.804682 0.708849 0.869368 + 256: skipped 0.797800 0.721042 0.880848 + 511: skipped 3.643626 3.687562 4.584770 + 512: skipped 3.715215 4.893539 5.538462 + +3-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.004520 0.011519 0.009464 0.012335 + 8: 0.006422 0.010294 0.010220 0.011711 + 15: 0.329566 0.060978 0.045495 0.073692 + 16: 0.405275 0.069999 0.040659 0.086114 + 31: 24.935228 1.654920 0.710509 1.773879 + 32: 27.524226 0.724053 0.543507 1.027568 + 63: skipped 8.982771 12.407683 16.900078 + 64: skipped 8.956070 11.934627 17.296447 + +""" diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_speeds.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_speeds.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9f8f648a9c428a898ac5353cfd2375469b50fc7 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_speeds.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_discretize.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_discretize.py new file mode 100644 index 0000000000000000000000000000000000000000..388c0885d131dcd3ec5ab4bae52c24c694688779 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_discretize.py @@ -0,0 +1,198 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_allclose + +from ..utils import discretize_model +from ...modeling.functional_models import ( + Gaussian1D, Box1D, MexicanHat1D, Gaussian2D, Box2D, MexicanHat2D) +from ...modeling.tests.example_models import models_1D, models_2D +from ...modeling.tests.test_models import create_model + +try: + import scipy # pylint: disable=W0611 + HAS_SCIPY = True +except ImportError: + HAS_SCIPY = False + + +modes = ['center', 'linear_interp', 'oversample'] +test_models_1D = [Gaussian1D, Box1D, MexicanHat1D] +test_models_2D = [Gaussian2D, Box2D, MexicanHat2D] + + +@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_1D, modes))) +def test_pixel_sum_1D(model_class, mode): + """ + Test if the sum of all pixels corresponds nearly to the integral. + """ + if model_class == Box1D and mode == "center": + pytest.skip("Non integrating mode. Skip integral test.") + parameters = models_1D[model_class] + model = create_model(model_class, parameters) + + values = discretize_model(model, models_1D[model_class]['x_lim'], mode=mode) + assert_allclose(values.sum(), models_1D[model_class]['integral'], atol=0.0001) + + +@pytest.mark.parametrize('mode', modes) +def test_gaussian_eval_1D(mode): + """ + Discretize Gaussian with different modes and check + if result is at least similar to Gaussian1D.eval(). + """ + model = Gaussian1D(1, 0, 20) + x = np.arange(-100, 101) + values = model(x) + disc_values = discretize_model(model, (-100, 101), mode=mode) + assert_allclose(values, disc_values, atol=0.001) + + +@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_2D, modes))) +def test_pixel_sum_2D(model_class, mode): + """ + Test if the sum of all pixels corresponds nearly to the integral. + """ + if model_class == Box2D and mode == "center": + pytest.skip("Non integrating mode. Skip integral test.") + + parameters = models_2D[model_class] + model = create_model(model_class, parameters) + + values = discretize_model(model, models_2D[model_class]['x_lim'], + models_2D[model_class]['y_lim'], mode=mode) + assert_allclose(values.sum(), models_2D[model_class]['integral'], atol=0.0001) + + +@pytest.mark.parametrize('mode', modes) +def test_gaussian_eval_2D(mode): + """ + Discretize Gaussian with different modes and check + if result is at least similar to Gaussian2D.eval() + """ + model = Gaussian2D(0.01, 0, 0, 1, 1) + + x = np.arange(-2, 3) + y = np.arange(-2, 3) + + x, y = np.meshgrid(x, y) + + values = model(x, y) + disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode) + assert_allclose(values, disc_values, atol=1e-2) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_gaussian_eval_2D_integrate_mode(): + """ + Discretize Gaussian with integrate mode + """ + model_list = [Gaussian2D(.01, 0, 0, 2, 2), + Gaussian2D(.01, 0, 0, 1, 2), + Gaussian2D(.01, 0, 0, 2, 1)] + + x = np.arange(-2, 3) + y = np.arange(-2, 3) + + x, y = np.meshgrid(x, y) + + for model in model_list: + values = model(x, y) + disc_values = discretize_model(model, (-2, 3), (-2, 3), mode='integrate') + assert_allclose(values, disc_values, atol=1e-2) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_subpixel_gauss_1D(): + """ + Test subpixel accuracy of the integrate mode with gaussian 1D model. + """ + gauss_1D = Gaussian1D(1, 0, 0.1) + values = discretize_model(gauss_1D, (-1, 2), mode='integrate', factor=100) + assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_subpixel_gauss_2D(): + """ + Test subpixel accuracy of the integrate mode with gaussian 2D model. + """ + gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1) + values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode='integrate', factor=100) + assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001) + + +def test_discretize_callable_1d(): + """ + Test discretize when a 1d function is passed. + """ + def f(x): + return x ** 2 + y = discretize_model(f, (-5, 6)) + assert_allclose(y, np.arange(-5, 6) ** 2) + + +def test_discretize_callable_2d(): + """ + Test discretize when a 2d function is passed. + """ + def f(x, y): + return x ** 2 + y ** 2 + actual = discretize_model(f, (-5, 6), (-5, 6)) + y, x = (np.indices((11, 11)) - 5) + desired = x ** 2 + y ** 2 + assert_allclose(actual, desired) + + +def test_type_exception(): + """ + Test type exception. + """ + with pytest.raises(TypeError) as exc: + discretize_model(float(0), (-10, 11)) + assert exc.value.args[0] == 'Model must be callable.' + + +def test_dim_exception_1d(): + """ + Test dimension exception 1d. + """ + def f(x): + return x ** 2 + with pytest.raises(ValueError) as exc: + discretize_model(f, (-10, 11), (-10, 11)) + assert exc.value.args[0] == "y range specified, but model is only 1-d." + + +def test_dim_exception_2d(): + """ + Test dimension exception 2d. + """ + def f(x, y): + return x ** 2 + y ** 2 + with pytest.raises(ValueError) as exc: + discretize_model(f, (-10, 11)) + assert exc.value.args[0] == "y range not specified, but model is 2-d" + + +def test_float_x_range_exception(): + def f(x, y): + return x ** 2 + y ** 2 + with pytest.raises(ValueError) as exc: + discretize_model(f, (-10.002, 11.23)) + assert exc.value.args[0] == ("The difference between the upper an lower" + " limit of 'x_range' must be a whole number.") + + +def test_float_y_range_exception(): + def f(x, y): + return x ** 2 + y ** 2 + with pytest.raises(ValueError) as exc: + discretize_model(f, (-10, 11), (-10.002, 11.23)) + assert exc.value.args[0] == ("The difference between the upper an lower" + " limit of 'y_range' must be a whole number.") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_discretize.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_discretize.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92200efa04bfd6c9e92718dfa600cf78890c68a4 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_discretize.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_kernel_class.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_kernel_class.py new file mode 100644 index 0000000000000000000000000000000000000000..592923eaffb8dea1b7671084d40cfef04d553b21 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_kernel_class.py @@ -0,0 +1,522 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_almost_equal, assert_allclose + +from ..convolve import convolve, convolve_fft +from ..kernels import ( + Gaussian1DKernel, Gaussian2DKernel, Box1DKernel, Box2DKernel, + Trapezoid1DKernel, TrapezoidDisk2DKernel, MexicanHat1DKernel, + Tophat2DKernel, MexicanHat2DKernel, AiryDisk2DKernel, Ring2DKernel, + CustomKernel, Model1DKernel, Model2DKernel, Kernel1D, Kernel2D) + +from ..utils import KernelSizeError +from ...modeling.models import Box2D, Gaussian1D, Gaussian2D +from ...utils.exceptions import AstropyWarning, AstropyUserWarning + +try: + from scipy.ndimage import filters + HAS_SCIPY = True +except ImportError: + HAS_SCIPY = False + +WIDTHS_ODD = [3, 5, 7, 9] +WIDTHS_EVEN = [2, 4, 8, 16] +MODES = ['center', 'linear_interp', 'oversample', 'integrate'] +KERNEL_TYPES = [Gaussian1DKernel, Gaussian2DKernel, + Box1DKernel, Box2DKernel, + Trapezoid1DKernel, TrapezoidDisk2DKernel, + MexicanHat1DKernel, Tophat2DKernel, AiryDisk2DKernel, Ring2DKernel] + + +NUMS = [1, 1., np.float(1.), np.float32(1.), np.float64(1.)] + + +# Test data +delta_pulse_1D = np.zeros(81) +delta_pulse_1D[40] = 1 + +delta_pulse_2D = np.zeros((81, 81)) +delta_pulse_2D[40, 40] = 1 + +random_data_1D = np.random.rand(61) +random_data_2D = np.random.rand(61, 61) + + +class TestKernels(object): + """ + Test class for the built-in convolution kernels. + """ + + @pytest.mark.skipif('not HAS_SCIPY') + @pytest.mark.parametrize(('width'), WIDTHS_ODD) + def test_scipy_filter_gaussian(self, width): + """ + Test GaussianKernel against SciPy ndimage gaussian filter. + """ + gauss_kernel_1D = Gaussian1DKernel(width) + gauss_kernel_1D.normalize() + gauss_kernel_2D = Gaussian2DKernel(width) + gauss_kernel_2D.normalize() + + astropy_1D = convolve(delta_pulse_1D, gauss_kernel_1D, boundary='fill') + astropy_2D = convolve(delta_pulse_2D, gauss_kernel_2D, boundary='fill') + + scipy_1D = filters.gaussian_filter(delta_pulse_1D, width) + scipy_2D = filters.gaussian_filter(delta_pulse_2D, width) + + assert_almost_equal(astropy_1D, scipy_1D, decimal=12) + assert_almost_equal(astropy_2D, scipy_2D, decimal=12) + + @pytest.mark.skipif('not HAS_SCIPY') + @pytest.mark.parametrize(('width'), WIDTHS_ODD) + def test_scipy_filter_gaussian_laplace(self, width): + """ + Test MexicanHat kernels against SciPy ndimage gaussian laplace filters. + """ + mexican_kernel_1D = MexicanHat1DKernel(width) + mexican_kernel_2D = MexicanHat2DKernel(width) + + astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=False) + astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=False) + + with pytest.raises(Exception) as exc: + astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=True) + assert 'sum is close to zero' in exc.value.args[0] + + with pytest.raises(Exception) as exc: + astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=True) + assert 'sum is close to zero' in exc.value.args[0] + + # The Laplace of Gaussian filter is an inverted Mexican Hat + # filter. + scipy_1D = -filters.gaussian_laplace(delta_pulse_1D, width) + scipy_2D = -filters.gaussian_laplace(delta_pulse_2D, width) + + # There is a slight deviation in the normalization. They differ by a + # factor of ~1.0000284132604045. The reason is not known. + assert_almost_equal(astropy_1D, scipy_1D, decimal=5) + assert_almost_equal(astropy_2D, scipy_2D, decimal=5) + + @pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD))) + def test_delta_data(self, kernel_type, width): + """ + Test smoothing of an image with a single positive pixel + """ + if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: + pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") + if not kernel_type == Ring2DKernel: + kernel = kernel_type(width) + else: + kernel = kernel_type(width, width * 0.2) + + if kernel.dimension == 1: + c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + else: + c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD))) + def test_random_data(self, kernel_type, width): + """ + Test smoothing of an image made of random noise + """ + if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: + pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") + if not kernel_type == Ring2DKernel: + kernel = kernel_type(width) + else: + kernel = kernel_type(width, width * 0.2) + + if kernel.dimension == 1: + c1 = convolve_fft(random_data_1D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(random_data_1D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + else: + c1 = convolve_fft(random_data_2D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(random_data_2D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('width'), WIDTHS_ODD) + def test_uniform_smallkernel(self, width): + """ + Test smoothing of an image with a single positive pixel + + Instead of using kernel class, uses a simple, small kernel + """ + kernel = np.ones([width, width]) + + c2 = convolve_fft(delta_pulse_2D, kernel, boundary='fill') + c1 = convolve(delta_pulse_2D, kernel, boundary='fill') + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('width'), WIDTHS_ODD) + def test_smallkernel_vs_Box2DKernel(self, width): + """ + Test smoothing of an image with a single positive pixel + """ + kernel1 = np.ones([width, width]) / width ** 2 + kernel2 = Box2DKernel(width) + + c2 = convolve_fft(delta_pulse_2D, kernel2, boundary='fill') + c1 = convolve_fft(delta_pulse_2D, kernel1, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) + + def test_convolve_1D_kernels(self): + """ + Check if convolving two kernels with each other works correctly. + """ + gauss_1 = Gaussian1DKernel(3) + gauss_2 = Gaussian1DKernel(4) + test_gauss_3 = Gaussian1DKernel(5) + + gauss_3 = convolve(gauss_1, gauss_2) + assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01) + + def test_convolve_2D_kernels(self): + """ + Check if convolving two kernels with each other works correctly. + """ + gauss_1 = Gaussian2DKernel(3) + gauss_2 = Gaussian2DKernel(4) + test_gauss_3 = Gaussian2DKernel(5) + + gauss_3 = convolve(gauss_1, gauss_2) + assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01) + + @pytest.mark.parametrize(('number'), NUMS) + def test_multiply_scalar(self, number): + """ + Check if multiplying a kernel with a scalar works correctly. + """ + gauss = Gaussian1DKernel(3) + gauss_new = number * gauss + assert_almost_equal(gauss_new.array, gauss.array * number, decimal=12) + + @pytest.mark.parametrize(('number'), NUMS) + def test_multiply_scalar_type(self, number): + """ + Check if multiplying a kernel with a scalar works correctly. + """ + gauss = Gaussian1DKernel(3) + gauss_new = number * gauss + assert type(gauss_new) is Gaussian1DKernel + + @pytest.mark.parametrize(('number'), NUMS) + def test_rmultiply_scalar_type(self, number): + """ + Check if multiplying a kernel with a scalar works correctly. + """ + gauss = Gaussian1DKernel(3) + gauss_new = gauss * number + assert type(gauss_new) is Gaussian1DKernel + + def test_multiply_kernel1d(self): + """Test that multiplying two 1D kernels raises an exception.""" + gauss = Gaussian1DKernel(3) + with pytest.raises(Exception): + gauss * gauss + + def test_multiply_kernel2d(self): + """Test that multiplying two 2D kernels raises an exception.""" + gauss = Gaussian2DKernel(3) + with pytest.raises(Exception): + gauss * gauss + + def test_multiply_kernel1d_kernel2d(self): + """ + Test that multiplying a 1D kernel with a 2D kernel raises an + exception. + """ + with pytest.raises(Exception): + Gaussian1DKernel(3) * Gaussian2DKernel(3) + + def test_add_kernel_scalar(self): + """Test that adding a scalar to a kernel raises an exception.""" + with pytest.raises(Exception): + Gaussian1DKernel(3) + 1 + + def test_model_1D_kernel(self): + """ + Check Model1DKernel against Gaussian1Dkernel + """ + stddev = 5. + gauss = Gaussian1D(1. / np.sqrt(2 * np.pi * stddev**2), 0, stddev) + model_gauss_kernel = Model1DKernel(gauss, x_size=21) + gauss_kernel = Gaussian1DKernel(stddev, x_size=21) + assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, + decimal=12) + + def test_model_2D_kernel(self): + """ + Check Model2DKernel against Gaussian2Dkernel + """ + stddev = 5. + gauss = Gaussian2D(1. / (2 * np.pi * stddev**2), 0, 0, stddev, stddev) + model_gauss_kernel = Model2DKernel(gauss, x_size=21) + gauss_kernel = Gaussian2DKernel(stddev, x_size=21) + assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, + decimal=12) + + def test_custom_1D_kernel(self): + """ + Check CustomKernel against Box1DKernel. + """ + # Define one dimensional array: + array = np.ones(5) + custom = CustomKernel(array) + custom.normalize() + box = Box1DKernel(5) + + c2 = convolve(delta_pulse_1D, custom, boundary='fill') + c1 = convolve(delta_pulse_1D, box, boundary='fill') + assert_almost_equal(c1, c2, decimal=12) + + def test_custom_2D_kernel(self): + """ + Check CustomKernel against Box2DKernel. + """ + # Define one dimensional array: + array = np.ones((5, 5)) + custom = CustomKernel(array) + custom.normalize() + box = Box2DKernel(5) + + c2 = convolve(delta_pulse_2D, custom, boundary='fill') + c1 = convolve(delta_pulse_2D, box, boundary='fill') + assert_almost_equal(c1, c2, decimal=12) + + def test_custom_1D_kernel_list(self): + """ + Check if CustomKernel works with lists. + """ + custom = CustomKernel([1, 1, 1, 1, 1]) + assert custom.is_bool is True + + def test_custom_2D_kernel_list(self): + """ + Check if CustomKernel works with lists. + """ + custom = CustomKernel([[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + assert custom.is_bool is True + + def test_custom_1D_kernel_zerosum(self): + """ + Check if CustomKernel works when the input array/list + sums to zero. + """ + array = [-2, -1, 0, 1, 2] + custom = CustomKernel(array) + custom.normalize() + assert custom.truncation == 0. + assert custom._kernel_sum == 0. + + def test_custom_2D_kernel_zerosum(self): + """ + Check if CustomKernel works when the input array/list + sums to zero. + """ + array = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]] + custom = CustomKernel(array) + custom.normalize() + assert custom.truncation == 0. + assert custom._kernel_sum == 0. + + def test_custom_kernel_odd_error(self): + """ + Check if CustomKernel raises if the array size is odd. + """ + with pytest.raises(KernelSizeError): + CustomKernel([1, 1, 1, 1]) + + def test_add_1D_kernels(self): + """ + Check if adding of two 1D kernels works. + """ + box_1 = Box1DKernel(5) + box_2 = Box1DKernel(3) + box_3 = Box1DKernel(1) + box_sum_1 = box_1 + box_2 + box_3 + box_sum_2 = box_2 + box_3 + box_1 + box_sum_3 = box_3 + box_1 + box_2 + ref = [1/5., 1/5. + 1/3., 1 + 1/3. + 1/5., 1/5. + 1/3., 1/5.] + assert_almost_equal(box_sum_1.array, ref, decimal=12) + assert_almost_equal(box_sum_2.array, ref, decimal=12) + assert_almost_equal(box_sum_3.array, ref, decimal=12) + + # Assert that the kernels haven't changed + assert_almost_equal(box_1.array, [0.2, 0.2, 0.2, 0.2, 0.2], decimal=12) + assert_almost_equal(box_2.array, [1/3., 1/3., 1/3.], decimal=12) + assert_almost_equal(box_3.array, [1], decimal=12) + + def test_add_2D_kernels(self): + """ + Check if adding of two 1D kernels works. + """ + box_1 = Box2DKernel(3) + box_2 = Box2DKernel(1) + box_sum_1 = box_1 + box_2 + box_sum_2 = box_2 + box_1 + ref = [[1 / 9., 1 / 9., 1 / 9.], + [1 / 9., 1 + 1 / 9., 1 / 9.], + [1 / 9., 1 / 9., 1 / 9.]] + ref_1 = [[1 / 9., 1 / 9., 1 / 9.], + [1 / 9., 1 / 9., 1 / 9.], + [1 / 9., 1 / 9., 1 / 9.]] + assert_almost_equal(box_2.array, [[1]], decimal=12) + assert_almost_equal(box_1.array, ref_1, decimal=12) + assert_almost_equal(box_sum_1.array, ref, decimal=12) + assert_almost_equal(box_sum_2.array, ref, decimal=12) + + def test_Gaussian1DKernel_even_size(self): + """ + Check if even size for GaussianKernel works. + """ + gauss = Gaussian1DKernel(3, x_size=10) + assert gauss.array.size == 10 + + def test_Gaussian2DKernel_even_size(self): + """ + Check if even size for GaussianKernel works. + """ + gauss = Gaussian2DKernel(3, x_size=10, y_size=10) + assert gauss.array.shape == (10, 10) + + def test_normalize_peak(self): + """ + Check if normalize works with peak mode. + """ + custom = CustomKernel([1, 2, 3, 2, 1]) + custom.normalize(mode='peak') + assert custom.array.max() == 1 + + def test_check_kernel_attributes(self): + """ + Check if kernel attributes are correct. + """ + box = Box2DKernel(5) + + # Check truncation + assert box.truncation == 0 + + # Check model + assert isinstance(box.model, Box2D) + + # Check center + assert box.center == [2, 2] + + # Check normalization + box.normalize() + assert_almost_equal(box._kernel_sum, 1., decimal=12) + + # Check separability + assert box.separable + + @pytest.mark.parametrize(('kernel_type', 'mode'), list(itertools.product(KERNEL_TYPES, MODES))) + def test_discretize_modes(self, kernel_type, mode): + """ + Check if the different modes result in kernels that work with convolve. + Use only small kernel width, to make the test pass quickly. + """ + if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: + pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") + if not kernel_type == Ring2DKernel: + kernel = kernel_type(3) + else: + kernel = kernel_type(3, 3 * 0.2) + + if kernel.dimension == 1: + c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + else: + c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('width'), WIDTHS_EVEN) + def test_box_kernels_even_size(self, width): + """ + Check if BoxKernel work properly with even sizes. + """ + kernel_1D = Box1DKernel(width) + assert kernel_1D.shape[0] % 2 != 0 + assert kernel_1D.array.sum() == 1. + + kernel_2D = Box2DKernel(width) + assert np.all([_ % 2 != 0 for _ in kernel_2D.shape]) + assert kernel_2D.array.sum() == 1. + + def test_kernel_normalization(self): + """ + Test that repeated normalizations do not change the kernel [#3747]. + """ + + kernel = CustomKernel(np.ones(5)) + kernel.normalize() + data = np.copy(kernel.array) + + kernel.normalize() + assert_allclose(data, kernel.array) + + kernel.normalize() + assert_allclose(data, kernel.array) + + def test_kernel_normalization_mode(self): + """ + Test that an error is raised if mode is invalid. + """ + with pytest.raises(ValueError): + kernel = CustomKernel(np.ones(3)) + kernel.normalize(mode='invalid') + + def test_kernel1d_int_size(self): + """ + Test that an error is raised if ``Kernel1D`` ``x_size`` is not + an integer. + """ + with pytest.raises(TypeError): + Gaussian1DKernel(3, x_size=1.2) + + def test_kernel2d_int_xsize(self): + """ + Test that an error is raised if ``Kernel2D`` ``x_size`` is not + an integer. + """ + with pytest.raises(TypeError): + Gaussian2DKernel(3, x_size=1.2) + + def test_kernel2d_int_ysize(self): + """ + Test that an error is raised if ``Kernel2D`` ``y_size`` is not + an integer. + """ + with pytest.raises(TypeError): + Gaussian2DKernel(3, x_size=5, y_size=1.2) + + def test_kernel1d_initialization(self): + """ + Test that an error is raised if an array or model is not + specified for ``Kernel1D``. + """ + with pytest.raises(TypeError): + Kernel1D() + + def test_kernel2d_initialization(self): + """ + Test that an error is raised if an array or model is not + specified for ``Kernel2D``. + """ + with pytest.raises(TypeError): + Kernel2D() diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_kernel_class.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_kernel_class.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a90342a309323a3c9e4671489b9d3c5a952edce Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_kernel_class.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_pickle.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..15a214b23b94db650059f34ddcd0afa8d87768bc --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_pickle.py @@ -0,0 +1,27 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import, division, print_function, unicode_literals + +import pytest +import numpy as np + +from ... import convolution as conv +from ...tests.helper import pickle_protocol, check_pickling_recovery # noqa + + +@pytest.mark.parametrize(("name", "args", "kwargs", "xfail"), + [(conv.CustomKernel, [], + {'array': np.random.rand(15)}, + False), + (conv.Gaussian1DKernel, [1.0], + {'x_size': 5}, + True), + (conv.Gaussian2DKernel, [1.0], + {'x_size': 5, 'y_size': 5}, + True), + ]) +def test_simple_object(pickle_protocol, name, args, kwargs, xfail): + # Tests easily instantiated objects + if xfail: + pytest.xfail() + original = name(*args, **kwargs) + check_pickling_recovery(original, pickle_protocol) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_pickle.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_pickle.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d8162ca917022c28e05af19193c3d49059b162b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/tests/test_pickle.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/utils.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..38f229ccc36c3389f9cea8b7c3d4843d1ffb99a6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/utils.py @@ -0,0 +1,301 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from ..modeling.core import FittableModel, custom_model +from ..extern.six.moves import range + +__all__ = ['discretize_model'] + + +class DiscretizationError(Exception): + """ + Called when discretization of models goes wrong. + """ + + +class KernelSizeError(Exception): + """ + Called when size of kernels is even. + """ + + +def add_kernel_arrays_1D(array_1, array_2): + """ + Add two 1D kernel arrays of different size. + + The arrays are added with the centers lying upon each other. + """ + if array_1.size > array_2.size: + new_array = array_1.copy() + center = array_1.size // 2 + slice_ = slice(center - array_2.size // 2, + center + array_2.size // 2 + 1) + new_array[slice_] += array_2 + return new_array + elif array_2.size > array_1.size: + new_array = array_2.copy() + center = array_2.size // 2 + slice_ = slice(center - array_1.size // 2, + center + array_1.size // 2 + 1) + new_array[slice_] += array_1 + return new_array + return array_2 + array_1 + + +def add_kernel_arrays_2D(array_1, array_2): + """ + Add two 2D kernel arrays of different size. + + The arrays are added with the centers lying upon each other. + """ + if array_1.size > array_2.size: + new_array = array_1.copy() + center = [axes_size // 2 for axes_size in array_1.shape] + slice_x = slice(center[1] - array_2.shape[1] // 2, + center[1] + array_2.shape[1] // 2 + 1) + slice_y = slice(center[0] - array_2.shape[0] // 2, + center[0] + array_2.shape[0] // 2 + 1) + new_array[slice_y, slice_x] += array_2 + return new_array + elif array_2.size > array_1.size: + new_array = array_2.copy() + center = [axes_size // 2 for axes_size in array_2.shape] + slice_x = slice(center[1] - array_1.shape[1] // 2, + center[1] + array_1.shape[1] // 2 + 1) + slice_y = slice(center[0] - array_1.shape[0] // 2, + center[0] + array_1.shape[0] // 2 + 1) + new_array[slice_y, slice_x] += array_1 + return new_array + return array_2 + array_1 + + +def discretize_model(model, x_range, y_range=None, mode='center', factor=10): + """ + Function to evaluate analytical model functions on a grid. + + So far the function can only deal with pixel coordinates. + + Parameters + ---------- + model : `~astropy.modeling.FittableModel` or callable. + Analytic model function to be discretized. Callables, which are not an + instances of `~astropy.modeling.FittableModel` are passed to + `~astropy.modeling.custom_model` and then evaluated. + x_range : tuple + x range in which the model is evaluated. The difference between the + upper an lower limit must be a whole number, so that the output array + size is well defined. + y_range : tuple, optional + y range in which the model is evaluated. The difference between the + upper an lower limit must be a whole number, so that the output array + size is well defined. Necessary only for 2D models. + mode : str, optional + One of the following modes: + * ``'center'`` (default) + Discretize model by taking the value + at the center of the bin. + * ``'linear_interp'`` + Discretize model by linearly interpolating + between the values at the corners of the bin. + For 2D models interpolation is bilinear. + * ``'oversample'`` + Discretize model by taking the average + on an oversampled grid. + * ``'integrate'`` + Discretize model by integrating the model + over the bin using `scipy.integrate.quad`. + Very slow. + factor : float or int + Factor of oversampling. Default = 10. + + Returns + ------- + array : `numpy.array` + Model value array + + Notes + ----- + The ``oversample`` mode allows to conserve the integral on a subpixel + scale. Here is the example of a normalized Gaussian1D: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + import numpy as np + from astropy.modeling.models import Gaussian1D + from astropy.convolution.utils import discretize_model + gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5) + y_center = discretize_model(gauss_1D, (-2, 3), mode='center') + y_corner = discretize_model(gauss_1D, (-2, 3), mode='linear_interp') + y_oversample = discretize_model(gauss_1D, (-2, 3), mode='oversample') + plt.plot(y_center, label='center sum = {0:3f}'.format(y_center.sum())) + plt.plot(y_corner, label='linear_interp sum = {0:3f}'.format(y_corner.sum())) + plt.plot(y_oversample, label='oversample sum = {0:3f}'.format(y_oversample.sum())) + plt.xlabel('pixels') + plt.ylabel('value') + plt.legend() + plt.show() + + + """ + if not callable(model): + raise TypeError('Model must be callable.') + if not isinstance(model, FittableModel): + model = custom_model(model)() + ndim = model.n_inputs + if ndim > 2: + raise ValueError('discretize_model only supports 1-d and 2-d models.') + + if not float(np.diff(x_range)).is_integer(): + raise ValueError("The difference between the upper an lower limit of" + " 'x_range' must be a whole number.") + + if y_range: + if not float(np.diff(y_range)).is_integer(): + raise ValueError("The difference between the upper an lower limit of" + " 'y_range' must be a whole number.") + + if ndim == 2 and y_range is None: + raise ValueError("y range not specified, but model is 2-d") + if ndim == 1 and y_range is not None: + raise ValueError("y range specified, but model is only 1-d.") + if mode == "center": + if ndim == 1: + return discretize_center_1D(model, x_range) + elif ndim == 2: + return discretize_center_2D(model, x_range, y_range) + elif mode == "linear_interp": + if ndim == 1: + return discretize_linear_1D(model, x_range) + if ndim == 2: + return discretize_bilinear_2D(model, x_range, y_range) + elif mode == "oversample": + if ndim == 1: + return discretize_oversample_1D(model, x_range, factor) + if ndim == 2: + return discretize_oversample_2D(model, x_range, y_range, factor) + elif mode == "integrate": + if ndim == 1: + return discretize_integrate_1D(model, x_range) + if ndim == 2: + return discretize_integrate_2D(model, x_range, y_range) + else: + raise DiscretizationError('Invalid mode.') + + +def discretize_center_1D(model, x_range): + """ + Discretize model by taking the value at the center of the bin. + """ + x = np.arange(*x_range) + return model(x) + + +def discretize_center_2D(model, x_range, y_range): + """ + Discretize model by taking the value at the center of the pixel. + """ + x = np.arange(*x_range) + y = np.arange(*y_range) + x, y = np.meshgrid(x, y) + return model(x, y) + + +def discretize_linear_1D(model, x_range): + """ + Discretize model by performing a linear interpolation. + """ + # Evaluate model 0.5 pixel outside the boundaries + x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) + values_intermediate_grid = model(x) + return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1]) + + +def discretize_bilinear_2D(model, x_range, y_range): + """ + Discretize model by performing a bilinear interpolation. + """ + # Evaluate model 0.5 pixel outside the boundaries + x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) + y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) + x, y = np.meshgrid(x, y) + values_intermediate_grid = model(x, y) + + # Mean in y direction + values = 0.5 * (values_intermediate_grid[1:, :] + + values_intermediate_grid[:-1, :]) + # Mean in x direction + values = 0.5 * (values[:, 1:] + + values[:, :-1]) + return values + + +def discretize_oversample_1D(model, x_range, factor=10): + """ + Discretize model by taking the average on an oversampled grid. + """ + # Evaluate model on oversampled grid + x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor), + x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor) + + values = model(x) + + # Reshape and compute mean + values = np.reshape(values, (x.size // factor, factor)) + return values.mean(axis=1)[:-1] + + +def discretize_oversample_2D(model, x_range, y_range, factor=10): + """ + Discretize model by taking the average on an oversampled grid. + """ + # Evaluate model on oversampled grid + x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor), + x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor) + + y = np.arange(y_range[0] - 0.5 * (1 - 1 / factor), + y_range[1] + 0.5 * (1 + 1 / factor), 1. / factor) + x_grid, y_grid = np.meshgrid(x, y) + values = model(x_grid, y_grid) + + # Reshape and compute mean + shape = (y.size // factor, factor, x.size // factor, factor) + values = np.reshape(values, shape) + return values.mean(axis=3).mean(axis=1)[:-1, :-1] + + +def discretize_integrate_1D(model, x_range): + """ + Discretize model by integrating numerically the model over the bin. + """ + from scipy.integrate import quad + # Set up grid + x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) + values = np.array([]) + + # Integrate over all bins + for i in range(x.size - 1): + values = np.append(values, quad(model, x[i], x[i + 1])[0]) + return values + + +def discretize_integrate_2D(model, x_range, y_range): + """ + Discretize model by integrating the model over the pixel. + """ + from scipy.integrate import dblquad + # Set up grid + x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) + y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) + values = np.empty((y.size - 1, x.size - 1)) + + # Integrate over all pixels + for i in range(x.size - 1): + for j in range(y.size - 1): + values[j, i] = dblquad(lambda y, x: model(x, y), x[i], x[i + 1], + lambda x: y[j], lambda x: y[j + 1])[0] + return values diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/utils.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/utils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..109a1f2844726304c8d929279f3c90071487d5d6 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/convolution/utils.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63e432ec2614b47305e7d61cd8cbbf8ac4ae0951 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/__init__.py @@ -0,0 +1,13 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" astropy.cosmology contains classes and functions for cosmological +distance measures and other cosmology-related calculations. + +See the `Astropy documentation +`_ for more +detailed usage examples and references. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from .core import * +from .funcs import * diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac89415897483d2c8e673477a8725239a2bb8a24 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/core.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/core.py new file mode 100644 index 0000000000000000000000000000000000000000..f59782888c8f7ae62e57643e799b860ab90bb98c --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/core.py @@ -0,0 +1,2904 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from ..extern import six +from ..extern.six.moves import map + +import sys +from math import sqrt, pi, exp, log, floor +from abc import ABCMeta, abstractmethod + +import numpy as np + +from . import scalar_inv_efuncs + +from .. import constants as const +from .. import units as u +from ..utils import isiterable +from ..utils.compat.funcsigs import signature +from ..utils.state import ScienceState + +from . import parameters + +# Originally authored by Andrew Becker (becker@astro.washington.edu), +# and modified by Neil Crighton (neilcrighton@gmail.com) and Roban +# Kramer (robanhk@gmail.com). + +# Many of these adapted from Hogg 1999, astro-ph/9905116 +# and Linder 2003, PRL 90, 91301 + +__all__ = ["FLRW", "LambdaCDM", "FlatLambdaCDM", "wCDM", "FlatwCDM", + "Flatw0waCDM", "w0waCDM", "wpwaCDM", "w0wzCDM", + "default_cosmology"] + parameters.available + +__doctest_requires__ = {'*': ['scipy.integrate']} + +# Notes about speeding up integrals: +# --------------------------------- +# The supplied cosmology classes use a few tricks to speed +# up distance and time integrals. It is not necessary for +# anyone subclassing FLRW to use these tricks -- but if they +# do, such calculations may be a lot faster. +# The first, more basic, idea is that, in many cases, it's a big deal to +# provide explicit formulae for inv_efunc rather than simply +# setting up de_energy_scale -- assuming there is a nice expression. +# As noted above, almost all of the provided classes do this, and +# that template can pretty much be followed directly with the appropriate +# formula changes. +# The second, and more advanced, option is to also explicitly +# provide a scalar only version of inv_efunc. This results in a fairly +# large speedup (>10x in most cases) in the distance and age integrals, +# even if only done in python, because testing whether the inputs are +# iterable or pure scalars turns out to be rather expensive. To take +# advantage of this, the key thing is to explicitly set the +# instance variables self._inv_efunc_scalar and self._inv_efunc_scalar_args +# in the constructor for the subclass, where the latter are all the +# arguments except z to _inv_efunc_scalar. +# +# The provided classes do use this optimization, and in fact go +# even further and provide optimizations for no radiation, and for radiation +# with massless neutrinos coded in cython. Consult the subclasses for +# details, and scalar_inv_efuncs for the details. +# +# However, the important point is that it is -not- necessary to do this. + +# Some conversion constants -- useful to compute them once here +# and reuse in the initialization rather than have every object do them +# Note that the call to cgs is actually extremely expensive, +# so we actually skip using the units package directly, and +# hardwire the conversion from mks to cgs. This assumes that constants +# will always return mks by default -- if this is made faster for simple +# cases like this, it should be changed back. +# Note that the unit tests should catch it if this happens +H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s) +sec_to_Gyr = u.s.to(u.Gyr) +# const in critical density in cgs units (g cm^-3) +critdens_const = 3. / (8. * pi * const.G.value * 1000) +arcsec_in_radians = pi / (3600. * 180) +arcmin_in_radians = pi / (60. * 180) +# Radiation parameter over c^2 in cgs (g cm^-3 K^-4) +a_B_c2 = 4e-3 * const.sigma_sb.value / const.c.value ** 3 +# Boltzmann constant in eV / K +kB_evK = const.k_B.to(u.eV / u.K) + + +class CosmologyError(Exception): + pass + + +class Cosmology(object): + """ Placeholder for when a more general Cosmology class is + implemented. """ + + +@six.add_metaclass(ABCMeta) +class FLRW(Cosmology): + """ A class describing an isotropic and homogeneous + (Friedmann-Lemaitre-Robertson-Walker) cosmology. + + This is an abstract base class -- you can't instantiate + examples of this class, but must work with one of its + subclasses such as `LambdaCDM` or `wCDM`. + + Parameters + ---------- + + H0 : float or scalar `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. Note that this does not include + massive neutrinos. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Notes + ----- + Class instances are static -- you can't change the values + of the parameters. That is, all of the attributes above are + read only. + """ + + def __init__(self, H0, Om0, Ode0, Tcmb0=0, Neff=3.04, + m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + # all densities are in units of the critical density + self._Om0 = float(Om0) + if self._Om0 < 0.0: + raise ValueError("Matter density can not be negative") + self._Ode0 = float(Ode0) + if Ob0 is not None: + self._Ob0 = float(Ob0) + if self._Ob0 < 0.0: + raise ValueError("Baryonic density can not be negative") + if self._Ob0 > self._Om0: + raise ValueError("Baryonic density can not be larger than " + "total matter density") + self._Odm0 = self._Om0 - self._Ob0 + else: + self._Ob0 = None + self._Odm0 = None + + self._Neff = float(Neff) + if self._Neff < 0.0: + raise ValueError("Effective number of neutrinos can " + "not be negative") + self.name = name + + # Tcmb may have units + self._Tcmb0 = u.Quantity(Tcmb0, unit=u.K, dtype=np.float) + if not self._Tcmb0.isscalar: + raise ValueError("Tcmb0 is a non-scalar quantity") + + # Hubble parameter at z=0, km/s/Mpc + self._H0 = u.Quantity(H0, unit=u.km / u.s / u.Mpc, dtype=np.float) + if not self._H0.isscalar: + raise ValueError("H0 is a non-scalar quantity") + + # 100 km/s/Mpc * h = H0 (so h is dimensionless) + self._h = self._H0.value / 100. + # Hubble distance + self._hubble_distance = (const.c / self._H0).to(u.Mpc) + # H0 in s^-1; don't use units for speed + H0_s = self._H0.value * H0units_to_invs + # Hubble time; again, avoiding units package for speed + self._hubble_time = u.Quantity(sec_to_Gyr / H0_s, u.Gyr) + + # critical density at z=0 (grams per cubic cm) + cd0value = critdens_const * H0_s ** 2 + self._critical_density0 = u.Quantity(cd0value, u.g / u.cm ** 3) + + # Load up neutrino masses. Note: in Py2.x, floor is floating + self._nneutrinos = int(floor(self._Neff)) + + # We are going to share Neff between the neutrinos equally. + # In detail this is not correct, but it is a standard assumption + # because properly calculating it is a) complicated b) depends + # on the details of the massive neutrinos (e.g., their weak + # interactions, which could be unusual if one is considering sterile + # neutrinos) + self._massivenu = False + if self._nneutrinos > 0 and self._Tcmb0.value > 0: + self._neff_per_nu = self._Neff / self._nneutrinos + + # We can't use the u.Quantity constructor as we do above + # because it doesn't understand equivalencies + if not isinstance(m_nu, u.Quantity): + raise ValueError("m_nu must be a Quantity") + + m_nu = m_nu.to(u.eV, equivalencies=u.mass_energy()) + + # Now, figure out if we have massive neutrinos to deal with, + # and, if so, get the right number of masses + # It is worth the effort to keep track of massless ones separately + # (since they are quite easy to deal with, and a common use case + # is to set only one neutrino to have mass) + if m_nu.isscalar: + # Assume all neutrinos have the same mass + if m_nu.value == 0: + self._nmasslessnu = self._nneutrinos + self._nmassivenu = 0 + else: + self._massivenu = True + self._nmasslessnu = 0 + self._nmassivenu = self._nneutrinos + self._massivenu_mass = (m_nu.value * + np.ones(self._nneutrinos)) + else: + # Make sure we have the right number of masses + # -unless- they are massless, in which case we cheat a little + if m_nu.value.min() < 0: + raise ValueError("Invalid (negative) neutrino mass" + " encountered") + if m_nu.value.max() == 0: + self._nmasslessnu = self._nneutrinos + self._nmassivenu = 0 + else: + self._massivenu = True + if len(m_nu) != self._nneutrinos: + errstr = "Unexpected number of neutrino masses" + raise ValueError(errstr) + # Segregate out the massless ones + self._nmasslessnu = len(np.nonzero(m_nu.value == 0)[0]) + self._nmassivenu = self._nneutrinos - self._nmasslessnu + w = np.nonzero(m_nu.value > 0)[0] + self._massivenu_mass = m_nu[w] + + # Compute photon density, Tcmb, neutrino parameters + # Tcmb0=0 removes both photons and neutrinos, is handled + # as a special case for efficiency + if self._Tcmb0.value > 0: + # Compute photon density from Tcmb + self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 /\ + self._critical_density0.value + + # Compute Neutrino temperature + # The constant in front is (4/11)^1/3 -- see any + # cosmology book for an explanation -- for example, + # Weinberg 'Cosmology' p 154 eq (3.1.21) + self._Tnu0 = 0.7137658555036082 * self._Tcmb0 + + # Compute Neutrino Omega and total relativistic component + # for massive neutrinos. We also store a list version, + # since that is more efficient to do integrals with (perhaps + # surprisingly! But small python lists are more efficient + # than small numpy arrays). + if self._massivenu: + nu_y = self._massivenu_mass / (kB_evK * self._Tnu0) + self._nu_y = nu_y.value + self._nu_y_list = self._nu_y.tolist() + self._Onu0 = self._Ogamma0 * self.nu_relative_density(0) + else: + # This case is particularly simple, so do it directly + # The 0.2271... is 7/8 (4/11)^(4/3) -- the temperature + # bit ^4 (blackbody energy density) times 7/8 for + # FD vs. BE statistics. + self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0 + + else: + self._Ogamma0 = 0.0 + self._Tnu0 = u.Quantity(0.0, u.K) + self._Onu0 = 0.0 + + # Compute curvature density + self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0 + + # Subclasses should override this reference if they provide + # more efficient scalar versions of inv_efunc. + self._inv_efunc_scalar = self.inv_efunc + self._inv_efunc_scalar_args = () + + def _namelead(self): + """ Helper function for constructing __repr__""" + if self.name is None: + return "{0}(".format(self.__class__.__name__) + else: + return "{0}(name=\"{1}\", ".format(self.__class__.__name__, + self.name) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, "\ + "Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6}, "\ + "Ob0={7:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + # Set up a set of properties for H0, Om0, Ode0, Ok0, etc. for user access. + # Note that we don't let these be set (so, obj.Om0 = value fails) + + @property + def H0(self): + """ Return the Hubble constant as an `~astropy.units.Quantity` at z=0""" + return self._H0 + + @property + def Om0(self): + """ Omega matter; matter density/critical density at z=0""" + return self._Om0 + + @property + def Ode0(self): + """ Omega dark energy; dark energy density/critical density at z=0""" + return self._Ode0 + + @property + def Ob0(self): + """ Omega baryon; baryonic matter density/critical density at z=0""" + return self._Ob0 + + @property + def Odm0(self): + """ Omega dark matter; dark matter density/critical density at z=0""" + return self._Odm0 + + @property + def Ok0(self): + """ Omega curvature; the effective curvature density/critical density + at z=0""" + return self._Ok0 + + @property + def Tcmb0(self): + """ Temperature of the CMB as `~astropy.units.Quantity` at z=0""" + return self._Tcmb0 + + @property + def Tnu0(self): + """ Temperature of the neutrino background as `~astropy.units.Quantity` at z=0""" + return self._Tnu0 + + @property + def Neff(self): + """ Number of effective neutrino species""" + return self._Neff + + @property + def has_massive_nu(self): + """ Does this cosmology have at least one massive neutrino species?""" + if self._Tnu0.value == 0: + return False + return self._massivenu + + @property + def m_nu(self): + """ Mass of neutrino species""" + if self._Tnu0.value == 0: + return None + if not self._massivenu: + # Only massless + return u.Quantity(np.zeros(self._nmasslessnu), u.eV, + dtype=np.float) + if self._nmasslessnu == 0: + # Only massive + return u.Quantity(self._massivenu_mass, u.eV, + dtype=np.float) + # A mix -- the most complicated case + numass = np.append(np.zeros(self._nmasslessnu), + self._massivenu_mass.value) + return u.Quantity(numass, u.eV, dtype=np.float) + + @property + def h(self): + """ Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]""" + return self._h + + @property + def hubble_time(self): + """ Hubble time as `~astropy.units.Quantity`""" + return self._hubble_time + + @property + def hubble_distance(self): + """ Hubble distance as `~astropy.units.Quantity`""" + return self._hubble_distance + + @property + def critical_density0(self): + """ Critical density as `~astropy.units.Quantity` at z=0""" + return self._critical_density0 + + @property + def Ogamma0(self): + """ Omega gamma; the density/critical density of photons at z=0""" + return self._Ogamma0 + + @property + def Onu0(self): + """ Omega nu; the density/critical density of neutrinos at z=0""" + return self._Onu0 + + def clone(self, **kwargs): + """ Returns a copy of this object, potentially with some changes. + + Returns + ------- + newcos : Subclass of FLRW + A new instance of this class with the specified changes. + + Notes + ----- + This assumes that the values of all constructor arguments + are available as properties, which is true of all the provided + subclasses but may not be true of user-provided ones. You can't + change the type of class, so this can't be used to change between + flat and non-flat. If no modifications are requested, then + a reference to this object is returned. + + Examples + -------- + To make a copy of the Planck13 cosmology with a different Omega_m + and a new name: + + >>> from astropy.cosmology import Planck13 + >>> newcos = Planck13.clone(name="Modified Planck 2013", Om0=0.35) + """ + + # Quick return check, taking advantage of the + # immutability of cosmological objects + if len(kwargs) == 0: + return self + + # Get constructor arguments + arglist = signature(self.__init__).parameters.keys() + + # Build the dictionary of values used to construct this + # object. This -assumes- every argument to __init__ has a + # property. This is true of all the classes we provide, but + # maybe a user won't do that. So at least try to have a useful + # error message. + argdict = {} + for arg in arglist: + try: + val = getattr(self, arg) + argdict[arg] = val + except AttributeError: + # We didn't find a property -- complain usefully + errstr = "Object did not have property corresponding "\ + "to constructor argument '{}'; perhaps it is a "\ + "user provided subclass that does not do so" + raise AttributeError(errstr.format(arg)) + + # Now substitute in new arguments + for newarg in kwargs: + if newarg not in argdict: + errstr = "User provided argument '{}' not found in "\ + "constructor for this object" + raise AttributeError(errstr.format(newarg)) + argdict[newarg] = kwargs[newarg] + + return self.__class__(**argdict) + + @abstractmethod + def w(self, z): + """ The dark energy equation of state. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ----- + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. + + This must be overridden by subclasses. + """ + raise NotImplementedError("w(z) is not implemented") + + def Om(self, z): + """ Return the density parameter for non-relativistic matter + at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Om : ndarray, or float if input scalar + The density of non-relativistic matter relative to the critical + density at each redshift. + + Notes + ----- + This does not include neutrinos, even if non-relativistic + at the redshift of interest; see `Onu`. + """ + + if isiterable(z): + z = np.asarray(z) + return self._Om0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 + + def Ob(self, z): + """ Return the density parameter for baryonic matter at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Ob : ndarray, or float if input scalar + The density of baryonic matter relative to the critical density at + each redshift. + + Raises + ------ + ValueError + If Ob0 is None. + """ + + if self._Ob0 is None: + raise ValueError("Baryon density not set for this cosmology") + if isiterable(z): + z = np.asarray(z) + return self._Ob0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 + + def Odm(self, z): + """ Return the density parameter for dark matter at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Odm : ndarray, or float if input scalar + The density of non-relativistic dark matter relative to the critical + density at each redshift. + + Raises + ------ + ValueError + If Ob0 is None. + Notes + ----- + This does not include neutrinos, even if non-relativistic + at the redshift of interest. + """ + + if self._Odm0 is None: + raise ValueError("Baryonic density not set for this cosmology, " + "unclear meaning of dark matter density") + if isiterable(z): + z = np.asarray(z) + return self._Odm0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 + + def Ok(self, z): + """ Return the equivalent density parameter for curvature + at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Ok : ndarray, or float if input scalar + The equivalent density parameter for curvature at each redshift. + """ + + if isiterable(z): + z = np.asarray(z) + # Common enough case to be worth checking explicitly + if self._Ok0 == 0: + return np.zeros(np.asanyarray(z).shape, dtype=np.float) + else: + if self._Ok0 == 0: + return 0.0 + + return self._Ok0 * (1. + z) ** 2 * self.inv_efunc(z) ** 2 + + def Ode(self, z): + """ Return the density parameter for dark energy at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Ode : ndarray, or float if input scalar + The density of non-relativistic matter relative to the critical + density at each redshift. + """ + + if isiterable(z): + z = np.asarray(z) + # Common case worth checking + if self._Ode0 == 0: + return np.zeros(np.asanyarray(z).shape, dtype=np.float) + else: + if self._Ode0 == 0: + return 0.0 + + return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2 + + def Ogamma(self, z): + """ Return the density parameter for photons at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Ogamma : ndarray, or float if input scalar + The energy density of photons relative to the critical + density at each redshift. + """ + + if isiterable(z): + z = np.asarray(z) + return self._Ogamma0 * (1. + z) ** 4 * self.inv_efunc(z) ** 2 + + def Onu(self, z): + """ Return the density parameter for neutrinos at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Onu : ndarray, or float if input scalar + The energy density of neutrinos relative to the critical + density at each redshift. Note that this includes their + kinetic energy (if they have mass), so it is not equal to + the commonly used :math:`\\sum \\frac{m_{\\nu}}{94 eV}`, + which does not include kinetic energy. + """ + + if isiterable(z): + z = np.asarray(z) + if self._Onu0 == 0: + return np.zeros(np.asanyarray(z).shape, dtype=np.float) + else: + if self._Onu0 == 0: + return 0.0 + + return self.Ogamma(z) * self.nu_relative_density(z) + + def Tcmb(self, z): + """ Return the CMB temperature at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Tcmb : `~astropy.units.Quantity` + The temperature of the CMB in K. + """ + + if isiterable(z): + z = np.asarray(z) + return self._Tcmb0 * (1. + z) + + def Tnu(self, z): + """ Return the neutrino temperature at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Tnu : `~astropy.units.Quantity` + The temperature of the cosmic neutrino background in K. + """ + + if isiterable(z): + z = np.asarray(z) + return self._Tnu0 * (1. + z) + + def nu_relative_density(self, z): + """ Neutrino density function relative to the energy density in + photons. + + Parameters + ---------- + z : array like + Redshift + + Returns + ------- + f : ndarray, or float if z is scalar + The neutrino density scaling factor relative to the density + in photons at each redshift + + Notes + ----- + The density in neutrinos is given by + + .. math:: + + \\rho_{\\nu} \\left(a\\right) = 0.2271 \\, N_{eff} \\, + f\\left(m_{\\nu} a / T_{\\nu 0} \\right) \\, + \\rho_{\\gamma} \\left( a \\right) + + where + + .. math:: + + f \\left(y\\right) = \\frac{120}{7 \\pi^4} + \\int_0^{\\infty} \\, dx \\frac{x^2 \\sqrt{x^2 + y^2}} + {e^x + 1} + + assuming that all neutrino species have the same mass. + If they have different masses, a similar term is calculated + for each one. Note that f has the asymptotic behavior :math:`f(0) = 1`. + This method returns :math:`0.2271 f` using an + analytical fitting formula given in Komatsu et al. 2011, ApJS 192, 18. + """ + + # Note that there is also a scalar-z-only cython implementation of + # this in scalar_inv_efuncs.pyx, so if you find a problem in this + # you need to update there too. + + # See Komatsu et al. 2011, eq 26 and the surrounding discussion + # for an explanation of what we are doing here. + # However, this is modified to handle multiple neutrino masses + # by computing the above for each mass, then summing + prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book + + # The massive and massless contribution must be handled separately + # But check for common cases first + if not self._massivenu: + if np.isscalar(z): + return prefac * self._Neff + else: + return prefac * self._Neff *\ + np.ones(np.asanyarray(z).shape, dtype=np.float) + + # These are purely fitting constants -- see the Komatsu paper + p = 1.83 + invp = 0.54644808743 # 1.0 / p + k = 0.3173 + + z = np.asarray(z) + curr_nu_y = self._nu_y / (1. + np.expand_dims(z, axis=-1)) + rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp + rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu + + return prefac * self._neff_per_nu * rel_mass + + def _w_integrand(self, ln1pz): + """ Internal convenience function for w(z) integral.""" + + # See Linder 2003, PRL 90, 91301 eq (5) + # Assumes scalar input, since this should only be called + # inside an integral + + z = exp(ln1pz) - 1.0 + return 1.0 + self.w(z) + + def de_density_scale(self, z): + r""" Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, + and is given by + + .. math:: + + I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} } + \left[ 1 + w\left( a^{\prime} \right) \right] \right) + + It will generally helpful for subclasses to overload this method if + the integral can be done analytically for the particular dark + energy equation of state that they implement. + """ + + # This allows for an arbitrary w(z) following eq (5) of + # Linder 2003, PRL 90, 91301. The code here evaluates + # the integral numerically. However, most popular + # forms of w(z) are designed to make this integral analytic, + # so it is probably a good idea for subclasses to overload this + # method if an analytic form is available. + # + # The integral we actually use (the one given in Linder) + # is rewritten in terms of z, so looks slightly different than the + # one in the documentation string, but it's the same thing. + + from scipy.integrate import quad + + if isiterable(z): + z = np.asarray(z) + ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0] + for redshift in z]) + return np.exp(3 * ival) + else: + ival = quad(self._w_integrand, 0, log(1 + z))[0] + return exp(3 * ival) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + + It is not necessary to override this method, but if de_density_scale + takes a particularly simple form, it may be advantageous to. + """ + + if isiterable(z): + z = np.asarray(z) + + Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 + if self._massivenu: + Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + + Ode0 * self.de_density_scale(z)) + + def inv_efunc(self, z): + """Inverse of efunc. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the inverse Hubble constant. + """ + + # Avoid the function overhead by repeating code + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 + if self._massivenu: + Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + + Ode0 * self.de_density_scale(z))**(-0.5) + + def _lookback_time_integrand_scalar(self, z): + """ Integrand of the lookback time. + + Parameters + ---------- + z : float + Input redshift. + + Returns + ------- + I : float + The integrand for the lookback time + + References + ---------- + Eqn 30 from Hogg 1999. + """ + + args = self._inv_efunc_scalar_args + return self._inv_efunc_scalar(z, *args) / (1.0 + z) + + def lookback_time_integrand(self, z): + """ Integrand of the lookback time. + + Parameters + ---------- + z : float or array-like + Input redshift. + + Returns + ------- + I : float or array + The integrand for the lookback time + + References + ---------- + Eqn 30 from Hogg 1999. + """ + + if isiterable(z): + zp1 = 1.0 + np.asarray(z) + else: + zp1 = 1. + z + + return self.inv_efunc(z) / zp1 + + def _abs_distance_integrand_scalar(self, z): + """ Integrand of the absorption distance. + + Parameters + ---------- + z : float + Input redshift. + + Returns + ------- + X : float + The integrand for the absorption distance + + References + ---------- + See Hogg 1999 section 11. + """ + + args = self._inv_efunc_scalar_args + return (1.0 + z) ** 2 * self._inv_efunc_scalar(z, *args) + + def abs_distance_integrand(self, z): + """ Integrand of the absorption distance. + + Parameters + ---------- + z : float or array + Input redshift. + + Returns + ------- + X : float or array + The integrand for the absorption distance + + References + ---------- + See Hogg 1999 section 11. + """ + + if isiterable(z): + zp1 = 1.0 + np.asarray(z) + else: + zp1 = 1. + z + return zp1 ** 2 * self.inv_efunc(z) + + def H(self, z): + """ Hubble parameter (km/s/Mpc) at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + H : `~astropy.units.Quantity` + Hubble parameter at each input redshift. + """ + + return self._H0 * self.efunc(z) + + def scale_factor(self, z): + """ Scale factor at redshift ``z``. + + The scale factor is defined as :math:`a = 1 / (1 + z)`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + a : ndarray, or float if input scalar + Scale factor at each input redshift. + """ + + if isiterable(z): + z = np.asarray(z) + + return 1. / (1. + z) + + def lookback_time(self, z): + """ Lookback time in Gyr to redshift ``z``. + + The lookback time is the difference between the age of the + Universe now and the age at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar + + Returns + ------- + t : `~astropy.units.Quantity` + Lookback time in Gyr to each input redshift. + + See Also + -------- + z_at_value : Find the redshift corresponding to a lookback time. + """ + + from scipy.integrate import quad + f = lambda red: quad(self._lookback_time_integrand_scalar, 0, red)[0] + return self._hubble_time * vectorize_if_needed(f, z) + + def lookback_distance(self, z): + """ + The lookback distance is the light travel time distance to a given + redshift. It is simply c * lookback_time. It may be used to calculate + the proper distance between two redshifts, e.g. for the mean free path + to ionizing radiation. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar + + Returns + ------- + d : `~astropy.units.Quantity` + Lookback distance in Mpc + """ + return (self.lookback_time(z) * const.c).to(u.Mpc) + + def age(self, z): + """ Age of the universe in Gyr at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + t : `~astropy.units.Quantity` + The age of the universe in Gyr at each input redshift. + + See Also + -------- + z_at_value : Find the redshift corresponding to an age. + """ + + from scipy.integrate import quad + f = lambda red: quad(self._lookback_time_integrand_scalar, + red, np.inf)[0] + return self._hubble_time * vectorize_if_needed(f, z) + + def critical_density(self, z): + """ Critical density in grams per cubic cm at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + rho : `~astropy.units.Quantity` + Critical density in g/cm^3 at each input redshift. + """ + + return self._critical_density0 * (self.efunc(z)) ** 2 + + def comoving_distance(self, z): + """ Comoving line-of-sight distance in Mpc at a given + redshift. + + The comoving distance along the line-of-sight between two + objects remains constant with time for objects in the Hubble + flow. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Comoving distance in Mpc to each input redshift. + """ + + return self._comoving_distance_z1z2(0, z) + + def _comoving_distance_z1z2(self, z1, z2): + """ Comoving line-of-sight distance in Mpc between objects at + redshifts z1 and z2. + + The comoving distance along the line-of-sight between two + objects remains constant with time for objects in the Hubble + flow. + + Parameters + ---------- + z1, z2 : array-like, shape (N,) + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Comoving distance in Mpc between each input redshift. + """ + + from scipy.integrate import quad + f = lambda z1, z2: quad(self._inv_efunc_scalar, z1, z2, + args=self._inv_efunc_scalar_args)[0] + return self._hubble_distance * vectorize_if_needed(f, z1, z2) + + def comoving_transverse_distance(self, z): + """ Comoving transverse distance in Mpc at a given redshift. + + This value is the transverse comoving distance at redshift ``z`` + corresponding to an angular separation of 1 radian. This is + the same as the comoving distance if omega_k is zero (as in + the current concordance lambda CDM model). + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Comoving transverse distance in Mpc at each input redshift. + + Notes + ----- + This quantity also called the 'proper motion distance' in some + texts. + """ + + return self._comoving_transverse_distance_z1z2(0, z) + + def _comoving_transverse_distance_z1z2(self, z1, z2): + """Comoving transverse distance in Mpc between two redshifts. + + This value is the transverse comoving distance at redshift + ``z2`` as seen from redshift ``z1`` corresponding to an + angular separation of 1 radian. This is the same as the + comoving distance if omega_k is zero (as in the current + concordance lambda CDM model). + + Parameters + ---------- + z1, z2 : array-like, shape (N,) + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Comoving transverse distance in Mpc between input redshift. + + Notes + ----- + This quantity is also called the 'proper motion distance' in + some texts. + + """ + + Ok0 = self._Ok0 + dc = self._comoving_distance_z1z2(z1, z2) + if Ok0 == 0: + return dc + sqrtOk0 = sqrt(abs(Ok0)) + dh = self._hubble_distance + if Ok0 > 0: + return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value) + else: + return dh / sqrtOk0 * np.sin(sqrtOk0 * dc.value / dh.value) + + def angular_diameter_distance(self, z): + """ Angular diameter distance in Mpc at a given redshift. + + This gives the proper (sometimes called 'physical') transverse + distance corresponding to an angle of 1 radian for an object + at redshift ``z``. + + Weinberg, 1972, pp 421-424; Weedman, 1986, pp 65-67; Peebles, + 1993, pp 325-327. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Angular diameter distance in Mpc at each input redshift. + """ + + if isiterable(z): + z = np.asarray(z) + + return self.comoving_transverse_distance(z) / (1. + z) + + def luminosity_distance(self, z): + """ Luminosity distance in Mpc at redshift ``z``. + + This is the distance to use when converting between the + bolometric flux from an object at redshift ``z`` and its + bolometric luminosity. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Luminosity distance in Mpc at each input redshift. + + See Also + -------- + z_at_value : Find the redshift corresponding to a luminosity distance. + + References + ---------- + Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62. + """ + + if isiterable(z): + z = np.asarray(z) + + return (1. + z) * self.comoving_transverse_distance(z) + + def angular_diameter_distance_z1z2(self, z1, z2): + """ Angular diameter distance between objects at 2 redshifts. + Useful for gravitational lensing. + + Parameters + ---------- + z1, z2 : array-like, shape (N,) + Input redshifts. z2 must be large than z1. + + Returns + ------- + d : `~astropy.units.Quantity`, shape (N,) or single if input scalar + The angular diameter distance between each input redshift + pair. + + """ + + z1 = np.asanyarray(z1) + z2 = np.asanyarray(z2) + return self._comoving_transverse_distance_z1z2(z1, z2) / (1. + z2) + + def absorption_distance(self, z): + """ Absorption distance at redshift ``z``. + + This is used to calculate the number of objects with some + cross section of absorption and number density intersecting a + sightline per unit redshift path. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : float or ndarray + Absorption distance (dimensionless) at each input redshift. + + References + ---------- + Hogg 1999 Section 11. (astro-ph/9905116) + Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B + """ + + from scipy.integrate import quad + f = lambda red: quad(self._abs_distance_integrand_scalar, 0, red)[0] + return vectorize_if_needed(f, z) + + def distmod(self, z): + """ Distance modulus at redshift ``z``. + + The distance modulus is defined as the (apparent magnitude - + absolute magnitude) for an object at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + distmod : `~astropy.units.Quantity` + Distance modulus at each input redshift, in magnitudes + + See Also + -------- + z_at_value : Find the redshift corresponding to a distance modulus. + """ + + # Remember that the luminosity distance is in Mpc + # Abs is necessary because in certain obscure closed cosmologies + # the distance modulus can be negative -- which is okay because + # it enters as the square. + val = 5. * np.log10(abs(self.luminosity_distance(z).value)) + 25.0 + return u.Quantity(val, u.mag) + + def comoving_volume(self, z): + """ Comoving volume in cubic Mpc at redshift ``z``. + + This is the volume of the universe encompassed by redshifts less + than ``z``. For the case of omega_k = 0 it is a sphere of radius + `comoving_distance` but it is less intuitive + if omega_k is not 0. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + V : `~astropy.units.Quantity` + Comoving volume in :math:`Mpc^3` at each input redshift. + """ + + Ok0 = self._Ok0 + if Ok0 == 0: + return 4. / 3. * pi * self.comoving_distance(z) ** 3 + + dh = self._hubble_distance.value # .value for speed + dm = self.comoving_transverse_distance(z).value + term1 = 4. * pi * dh ** 3 / (2. * Ok0) * u.Mpc ** 3 + term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2) + term3 = sqrt(abs(Ok0)) * dm / dh + + if Ok0 > 0: + return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3)) + else: + return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3)) + + def differential_comoving_volume(self, z): + """Differential comoving volume at redshift z. + + Useful for calculating the effective comoving volume. + For example, allows for integration over a comoving volume + that has a sensitivity function that changes with redshift. + The total comoving volume is given by integrating + differential_comoving_volume to redshift z + and multiplying by a solid angle. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + dV : `~astropy.units.Quantity` + Differential comoving volume per redshift per steradian at + each input redshift.""" + dh = self._hubble_distance + da = self.angular_diameter_distance(z) + zp1 = 1.0 + z + return dh * ((zp1 * da) ** 2.0) / u.Quantity(self.efunc(z), + u.steradian) + + def kpc_comoving_per_arcmin(self, z): + """ Separation in transverse comoving kpc corresponding to an + arcminute at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + The distance in comoving kpc corresponding to an arcmin at each + input redshift. + """ + return (self.comoving_transverse_distance(z).to(u.kpc) * + arcmin_in_radians / u.arcmin) + + def kpc_proper_per_arcmin(self, z): + """ Separation in transverse proper kpc corresponding to an + arcminute at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + The distance in proper kpc corresponding to an arcmin at each + input redshift. + """ + return (self.angular_diameter_distance(z).to(u.kpc) * + arcmin_in_radians / u.arcmin) + + def arcsec_per_kpc_comoving(self, z): + """ Angular separation in arcsec corresponding to a comoving kpc + at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + theta : `~astropy.units.Quantity` + The angular separation in arcsec corresponding to a comoving kpc + at each input redshift. + """ + return u.arcsec / (self.comoving_transverse_distance(z).to(u.kpc) * + arcsec_in_radians) + + def arcsec_per_kpc_proper(self, z): + """ Angular separation in arcsec corresponding to a proper kpc at + redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + theta : `~astropy.units.Quantity` + The angular separation in arcsec corresponding to a proper kpc + at each input redshift. + """ + return u.arcsec / (self.angular_diameter_distance(z).to(u.kpc) * + arcsec_in_radians) + + +class LambdaCDM(FLRW): + """FLRW cosmology with a cosmological constant and curvature. + + This has no additional attributes beyond those of FLRW. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of the cosmological constant in units of + the critical density at z=0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import LambdaCDM + >>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, Tcmb0=0, Neff=3.04, + m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list) + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is + :math:`w(z) = -1`. + """ + + if np.isscalar(z): + return -1.0 + else: + return -1.0 * np.ones(np.asanyarray(z).shape, dtype=np.float) + + def de_density_scale(self, z): + """ Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by :math:`I = 1`. + """ + + if np.isscalar(z): + return 1. + else: + return np.ones(np.asanyarray(z).shape, dtype=np.float) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + """ + + if isiterable(z): + z = np.asarray(z) + + # We override this because it takes a particularly simple + # form for a cosmological constant + Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0) + + def inv_efunc(self, z): + r""" Function used to calculate :math:`\frac{1}{H_z}`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The inverse redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H_z = H_0 / + E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 + if self._massivenu: + Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0)**(-0.5) + + +class FlatLambdaCDM(LambdaCDM): + """FLRW cosmology with a cosmological constant and no curvature. + + This has no additional attributes beyond those of FLRW. + + Parameters + ---------- + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import FlatLambdaCDM + >>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Tcmb0=0, Neff=3.04, + m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + LambdaCDM.__init__(self, H0, Om0, 0.0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + # Do some twiddling after the fact to get flatness + self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 + self._Ok0 = 0.0 + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0 + self._Onu0) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + """ + + if isiterable(z): + z = np.asarray(z) + + # We override this because it takes a particularly simple + # form for a cosmological constant + Om0, Ode0 = self._Om0, self._Ode0 + if self._massivenu: + Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0) + + def inv_efunc(self, z): + r"""Function used to calculate :math:`\frac{1}{H_z}`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The inverse redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H_z = H_0 / E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0 = self._Om0, self._Ode0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + return (zp1 ** 3 * (Or * zp1 + Om0) + Ode0)**(-0.5) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Tcmb0={3:.4g}, "\ + "Neff={4:.3g}, m_nu={5}, Ob0={6:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class wCDM(FLRW): + """FLRW cosmology with a constant dark energy equation of state + and curvature. + + This has one additional attribute beyond those of FLRW. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + w0 : float, optional + Dark energy equation of state at all redshifts. This is + pressure/density for dark energy in units where c=1. A cosmological + constant has w0=-1.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import wCDM + >>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, w0=-1., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + self._w0 = float(w0) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._w0) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0, + self._w0) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0) + + @property + def w0(self): + """ Dark energy equation of state""" + return self._w0 + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is + :math:`w(z) = w_0`. + """ + + if np.isscalar(z): + return self._w0 + else: + return self._w0 * np.ones(np.asanyarray(z).shape, dtype=np.float) + + def de_density_scale(self, z): + """ Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by + :math:`I = \\left(1 + z\\right)^{3\\left(1 + w_0\\right)}` + """ + + if isiterable(z): + z = np.asarray(z) + return (1. + z) ** (3. * (1. + self._w0)) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + + Ode0 * zp1 ** (3. * (1. + w0))) + + def inv_efunc(self, z): + r""" Function used to calculate :math:`\frac{1}{H_z}`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The inverse redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H_z = H_0 / E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + + Ode0 * zp1 ** (3. * (1. + w0)))**(-0.5) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, w0={4:.3g}, "\ + "Tcmb0={5:.4g}, Neff={6:.3g}, m_nu={7}, Ob0={8:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Ode0, self._w0, self._Tcmb0, self._Neff, + self.m_nu, _float_or_none(self._Ob0)) + + +class FlatwCDM(wCDM): + """FLRW cosmology with a constant dark energy equation of state + and no spatial curvature. + + This has one additional attribute beyond those of FLRW. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + w0 : float, optional + Dark energy equation of state at all redshifts. This is + pressure/density for dark energy in units where c=1. A cosmological + constant has w0=-1.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import FlatwCDM + >>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, w0=-1., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + wCDM.__init__(self, H0, Om0, 0.0, w0, Tcmb0, Neff, m_nu, + name=name, Ob0=Ob0) + # Do some twiddling after the fact to get flatness + self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 + self._Ok0 = 0.0 + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._w0) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0 + self._Onu0, + self._w0) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1. + z + + return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + + Ode0 * zp1 ** (3. * (1 + w0))) + + def inv_efunc(self, z): + r""" Function used to calculate :math:`\frac{1}{H_z}`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The inverse redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H_z = H_0 / E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1. + z + + return (zp1 ** 3 * (Or * zp1 + Om0) + + Ode0 * zp1 ** (3. * (1. + w0)))**(-0.5) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, w0={3:.3g}, Tcmb0={4:.4g}, "\ + "Neff={5:.3g}, m_nu={6}, Ob0={7:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, self._w0, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class w0waCDM(FLRW): + """FLRW cosmology with a CPL dark energy equation of state and curvature. + + The equation for the dark energy equation of state uses the + CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. + D10, 213 (2001) and Linder PRL 90, 91301 (2003): + :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. + + Parameters + ---------- + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + w0 : float, optional + Dark energy equation of state at z=0 (a=1). This is pressure/density + for dark energy in units where c=1. + + wa : float, optional + Negative derivative of the dark energy equation of state with respect + to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import w0waCDM + >>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, w0=-1., wa=0., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + self._w0 = float(w0) + self._wa = float(wa) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._w0, self._wa) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0, + self._w0, self._wa) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0, + self._wa) + + @property + def w0(self): + """ Dark energy equation of state at z=0""" + return self._w0 + + @property + def wa(self): + """ Negative derivative of dark energy equation of state w.r.t. a""" + return self._wa + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is + :math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \\frac{z}{1+z}`. + """ + + if isiterable(z): + z = np.asarray(z) + + return self._w0 + self._wa * z / (1.0 + z) + + def de_density_scale(self, z): + r""" Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by + + .. math:: + + I = \left(1 + z\right)^{3 \left(1 + w_0 + w_a\right)} + \exp \left(-3 w_a \frac{z}{1+z}\right) + + """ + if isiterable(z): + z = np.asarray(z) + zp1 = 1.0 + z + return zp1 ** (3 * (1 + self._w0 + self._wa)) * \ + np.exp(-3 * self._wa * z / zp1) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\ + "Ode0={3:.3g}, w0={4:.3g}, wa={5:.3g}, Tcmb0={6:.4g}, "\ + "Neff={7:.3g}, m_nu={8}, Ob0={9:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Ode0, self._w0, self._wa, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class Flatw0waCDM(w0waCDM): + """FLRW cosmology with a CPL dark energy equation of state and no + curvature. + + The equation for the dark energy equation of state uses the + CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. + D10, 213 (2001) and Linder PRL 90, 91301 (2003): + :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + w0 : float, optional + Dark energy equation of state at z=0 (a=1). This is pressure/density + for dark energy in units where c=1. + + wa : float, optional + Negative derivative of the dark energy equation of state with respect + to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import Flatw0waCDM + >>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, w0=-1., wa=0., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + w0waCDM.__init__(self, H0, Om0, 0.0, w0=w0, wa=wa, Tcmb0=Tcmb0, + Neff=Neff, m_nu=m_nu, name=name, Ob0=Ob0) + # Do some twiddling after the fact to get flatness + self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 + self._Ok0 = 0.0 + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._w0, self._wa) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0 + self._Onu0, + self._w0, self._wa) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0, + self._wa) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\ + "w0={3:.3g}, Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6}, "\ + "Ob0={7:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, self._w0, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class wpwaCDM(FLRW): + """FLRW cosmology with a CPL dark energy equation of state, a pivot + redshift, and curvature. + + The equation for the dark energy equation of state uses the + CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. + D10, 213 (2001) and Linder PRL 90, 91301 (2003), but modified + to have a pivot redshift as in the findings of the Dark Energy + Task Force (Albrecht et al. arXiv:0901.0721 (2009)): + :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + wp : float, optional + Dark energy equation of state at the pivot redshift zp. This is + pressure/density for dark energy in units where c=1. + + wa : float, optional + Negative derivative of the dark energy equation of state with respect + to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0. + + zp : float, optional + Pivot redshift -- the redshift where w(z) = wp + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import wpwaCDM + >>> cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.7, wp=-0.9, wa=0.2, zp=0.4) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, wp=-1., wa=0., zp=0, + Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), + Ob0=None, name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + self._wp = float(wp) + self._wa = float(wa) + self._zp = float(zp) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + apiv = 1.0 / (1.0 + self._zp) + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._wp, apiv, self._wa) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0, + self._wp, apiv, self._wa) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._wp, + apiv, self._wa) + + @property + def wp(self): + """ Dark energy equation of state at the pivot redshift zp""" + return self._wp + + @property + def wa(self): + """ Negative derivative of dark energy equation of state w.r.t. a""" + return self._wa + + @property + def zp(self): + """ The pivot redshift, where w(z) = wp""" + return self._zp + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is + :math:`w(z) = w_p + w_a (a_p - a)` where :math:`a = 1/1+z` + and :math:`a_p = 1 / 1 + z_p`. + """ + + if isiterable(z): + z = np.asarray(z) + + apiv = 1.0 / (1.0 + self._zp) + return self._wp + self._wa * (apiv - 1.0 / (1. + z)) + + def de_density_scale(self, z): + r""" Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by + + .. math:: + + a_p = \frac{1}{1 + z_p} + + I = \left(1 + z\right)^{3 \left(1 + w_p + a_p w_a\right)} + \exp \left(-3 w_a \frac{z}{1+z}\right) + """ + + if isiterable(z): + z = np.asarray(z) + zp1 = 1. + z + apiv = 1. / (1. + self._zp) + return zp1 ** (3. * (1. + self._wp + apiv * self._wa)) * \ + np.exp(-3. * self._wa * z / zp1) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, wp={4:.3g}, "\ + "wa={5:.3g}, zp={6:.3g}, Tcmb0={7:.4g}, Neff={8:.3g}, "\ + "m_nu={9}, Ob0={10:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Ode0, self._wp, self._wa, self._zp, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class w0wzCDM(FLRW): + """FLRW cosmology with a variable dark energy equation of state + and curvature. + + The equation for the dark energy equation of state uses the + simple form: :math:`w(z) = w_0 + w_z z`. + + This form is not recommended for z > 1. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + w0 : float, optional + Dark energy equation of state at z=0. This is pressure/density for + dark energy in units where c=1. + + wz : float, optional + Derivative of the dark energy equation of state with respect to z. + A cosmological constant has w0=-1.0 and wz=0.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import w0wzCDM + >>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, w0=-1., wz=0., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, + name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + self._w0 = float(w0) + self._wz = float(wz) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._w0, self._wz) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0, + self._w0, self._wz) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0, + self._wz) + + @property + def w0(self): + """ Dark energy equation of state at z=0""" + return self._w0 + + @property + def wz(self): + """ Derivative of the dark energy equation of state w.r.t. z""" + return self._wz + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is given by + :math:`w(z) = w_0 + w_z z`. + """ + + if isiterable(z): + z = np.asarray(z) + + return self._w0 + self._wz * z + + def de_density_scale(self, z): + r""" Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by + + .. math:: + + I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)} + \exp \left(-3 w_z z\right) + """ + + if isiterable(z): + z = np.asarray(z) + zp1 = 1. + z + return zp1 ** (3. * (1. + self._w0 - self._wz)) *\ + np.exp(-3. * self._wz * z) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\ + "Ode0={3:.3g}, w0={4:.3g}, wz={5:.3g} Tcmb0={6:.4g}, "\ + "Neff={7:.3g}, m_nu={8}, Ob0={9:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Ode0, self._w0, self._wz, self._Tcmb0, + self._Neff, self.m_nu, _float_or_none(self._Ob0)) + + +def _float_or_none(x, digits=3): + """ Helper function to format a variable that can be a float or None""" + if x is None: + return str(x) + fmtstr = "{0:.{digits}g}".format(x, digits=digits) + return fmtstr.format(x) + + +def vectorize_if_needed(func, *x): + """ Helper function to vectorize functions on array inputs""" + if any(map(isiterable, x)): + return np.vectorize(func)(*x) + else: + return func(*x) + + +# Pre-defined cosmologies. This loops over the parameter sets in the +# parameters module and creates a LambdaCDM or FlatLambdaCDM instance +# with the same name as the parameter set in the current module's namespace. +# Note this assumes all the cosmologies in parameters are LambdaCDM, +# which is true at least as of this writing. + +for key in parameters.available: + par = getattr(parameters, key) + if par['flat']: + cosmo = FlatLambdaCDM(par['H0'], par['Om0'], Tcmb0=par['Tcmb0'], + Neff=par['Neff'], + m_nu=u.Quantity(par['m_nu'], u.eV), + name=key, + Ob0=par['Ob0']) + docstr = "{} instance of FlatLambdaCDM cosmology\n\n(from {})" + cosmo.__doc__ = docstr.format(key, par['reference']) + else: + cosmo = LambdaCDM(par['H0'], par['Om0'], par['Ode0'], + Tcmb0=par['Tcmb0'], Neff=par['Neff'], + m_nu=u.Quantity(par['m_nu'], u.eV), name=key, + Ob0=par['Ob0']) + docstr = "{} instance of LambdaCDM cosmology\n\n(from {})" + cosmo.__doc__ = docstr.format(key, par['reference']) + setattr(sys.modules[__name__], key, cosmo) + +# don't leave these variables floating around in the namespace +del key, par, cosmo + +######################################################################### +# The science state below contains the current cosmology. +######################################################################### + + +class default_cosmology(ScienceState): + """ + The default cosmology to use. To change it:: + + >>> from astropy.cosmology import default_cosmology, WMAP7 + >>> with default_cosmology.set(WMAP7): + ... # WMAP7 cosmology in effect + + Or, you may use a string:: + + >>> with default_cosmology.set('WMAP7'): + ... # WMAP7 cosmology in effect + """ + _value = 'WMAP9' + + @staticmethod + def get_cosmology_from_string(arg): + """ Return a cosmology instance from a string. + """ + if arg == 'no_default': + cosmo = None + else: + try: + cosmo = getattr(sys.modules[__name__], arg) + except AttributeError: + s = "Unknown cosmology '{}'. Valid cosmologies:\n{}".format( + arg, parameters.available) + raise ValueError(s) + return cosmo + + @classmethod + def validate(cls, value): + if value is None: + value = 'Planck15' + if isinstance(value, six.string_types): + return cls.get_cosmology_from_string(value) + elif isinstance(value, Cosmology): + return value + else: + raise TypeError("default_cosmology must be a string or Cosmology instance.") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/core.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/core.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a71300dc9fa2029665d7274559c2219b2769e53 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/core.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/funcs.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..899ad39a6859f0f781ede11536f29857f85b9e93 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/funcs.py @@ -0,0 +1,146 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Convenience functions for `astropy.cosmology`. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import warnings +import numpy as np + +from .core import CosmologyError +from ..units import Quantity + +__all__ = ['z_at_value'] + +__doctest_requires__ = {'*': ['scipy.integrate']} + + +def z_at_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500): + """ Find the redshift ``z`` at which ``func(z) = fval``. + + This finds the redshift at which one of the cosmology functions or + methods (for example Planck13.distmod) is equal to a known value. + + .. warning:: + Make sure you understand the behaviour of the function that you + are trying to invert! Depending on the cosmology, there may not + be a unique solution. For example, in the standard Lambda CDM + cosmology, there are two redshifts which give an angular + diameter distance of 1500 Mpc, z ~ 0.7 and z ~ 3.8. To force + ``z_at_value`` to find the solution you are interested in, use the + ``zmin`` and ``zmax`` keywords to limit the search range (see the + example below). + + Parameters + ---------- + func : function or method + A function that takes a redshift as input. + fval : astropy.Quantity instance + The value of ``func(z)``. + zmin : float, optional + The lower search limit for ``z``. Beware of divergences + in some cosmological functions, such as distance moduli, + at z=0 (default 1e-8). + zmax : float, optional + The upper search limit for ``z`` (default 1000). + ztol : float, optional + The relative error in ``z`` acceptable for convergence. + maxfun : int, optional + The maximum number of function evaluations allowed in the + optimization routine (default 500). + + Returns + ------- + z : float + The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) = + fval`` within ``ztol``. + + Notes + ----- + This works for any arbitrary input cosmology, but is inefficient + if you want to invert a large number of values for the same + cosmology. In this case, it is faster to instead generate an array + of values at many closely-spaced redshifts that cover the relevant + redshift range, and then use interpolation to find the redshift at + each value you're interested in. For example, to efficiently find + the redshifts corresponding to 10^6 values of the distance modulus + in a Planck13 cosmology, you could do the following: + + >>> import astropy.units as u + >>> from astropy.cosmology import Planck13, z_at_value + + Generate 10^6 distance moduli between 24 and 43 for which we + want to find the corresponding redshifts: + + >>> Dvals = (24 + np.random.rand(1e6) * 20) * u.mag + + Make a grid of distance moduli covering the redshift range we + need using 50 equally log-spaced values between zmin and + zmax. We use log spacing to adequately sample the steep part of + the curve at low distance moduli: + + >>> zmin = z_at_value(Planck13.distmod, Dvals.min()) + >>> zmax = z_at_value(Planck13.distmod, Dvals.max()) + >>> zgrid = np.logspace(np.log10(zmin), np.log10(zmax), 50) + >>> Dgrid = Planck13.distmod(zgrid) + + Finally interpolate to find the redshift at each distance modulus: + + >>> zvals = np.interp(Dvals.value, zgrid, Dgrid.value) + + Examples + -------- + >>> import astropy.units as u + >>> from astropy.cosmology import Planck13, z_at_value + + The age and lookback time are monotonic with redshift, and so a + unique solution can be found: + + >>> z_at_value(Planck13.age, 2 * u.Gyr) + 3.19812268... + + The angular diameter is not monotonic however, and there are two + redshifts that give a value of 1500 Mpc. Use the zmin and zmax keywords + to find the one you're interested in: + + >>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, zmax=1.5) + 0.6812769577... + >>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, zmin=2.5) + 3.7914913242... + + Also note that the luminosity distance and distance modulus (two + other commonly inverted quantities) are monotonic in flat and open + universes, but not in closed universes. + """ + from scipy.optimize import fminbound + + fval_zmin = func(zmin) + fval_zmax = func(zmax) + if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval): + warnings.warn("""\ +fval is not bracketed by func(zmin) and func(zmax). This means either +there is no solution, or that there is more than one solution between +zmin and zmax satisfying fval = func(z).""") + + if isinstance(fval_zmin, Quantity): + val = fval.to_value(fval_zmin.unit) + f = lambda z: abs(func(z).value - val) + else: + f = lambda z: abs(func(z) - fval) + + zbest, resval, ierr, ncall = fminbound(f, zmin, zmax, maxfun=maxfun, + full_output=1, xtol=ztol) + + if ierr != 0: + warnings.warn('Maximum number of function calls ({}) reached'.format( + ncall)) + + if np.allclose(zbest, zmax): + raise CosmologyError("Best guess z is very close the upper z limit.\n" + "Try re-running with a different zmax.") + elif np.allclose(zbest, zmin): + raise CosmologyError("Best guess z is very close the lower z limit.\n" + "Try re-running with a different zmin.") + + return zbest diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/funcs.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/funcs.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fd1071be927cfd43c4851f2415daf70b9f540a9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/funcs.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/parameters.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..d86831fdc9942bae790eb34fc07e39dfb522bb02 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/parameters.py @@ -0,0 +1,148 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" This module contains dictionaries with sets of parameters for a +given cosmology. + +Each cosmology has the following parameters defined: + + ========== ===================================== + Oc0 Omega cold dark matter at z=0 + Ob0 Omega baryon at z=0 + Om0 Omega matter at z=0 + flat Is this assumed flat? If not, Ode0 must be specified + Ode0 Omega dark energy at z=0 if flat is False + H0 Hubble parameter at z=0 in km/s/Mpc + n Density perturbation spectral index + Tcmb0 Current temperature of the CMB + Neff Effective number of neutrino species + sigma8 Density perturbation amplitude + tau Ionisation optical depth + z_reion Redshift of hydrogen reionisation + t0 Age of the universe in Gyr + reference Reference for the parameters + ========== ===================================== + +The list of cosmologies available are given by the tuple +`available`. Current cosmologies available: + +Planck 2015 (Planck15) parameters from Planck Collaboration 2016, A&A, 594, A13 + (Paper XIII), Table 4 (TT, TE, EE + lowP + lensing + ext) + +Planck 2013 (Planck13) parameters from Planck Collaboration 2014, A&A, 571, A16 + (Paper XVI), Table 5 (Planck + WP + highL + BAO) + +WMAP 9 year (WMAP9) parameters from Hinshaw et al. 2013, ApJS, 208, 19, +doi: 10.1088/0067-0049/208/2/19. Table 4 (WMAP9 + eCMB + BAO + H0) + +WMAP 7 year (WMAP7) parameters from Komatsu et al. 2011, ApJS, 192, 18, +doi: 10.1088/0067-0049/192/2/18. Table 1 (WMAP + BAO + H0 ML). + +WMAP 5 year (WMAP5) parameters from Komatsu et al. 2009, ApJS, 180, 330, +doi: 10.1088/0067-0049/180/2/330. Table 1 (WMAP + BAO + SN ML). + +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +# Note: if you add a new cosmology, please also update the table +# in the 'Built-in Cosmologies' section of astropy/docs/cosmology/index.rst +# in addition to the list above. You also need to add them to the 'available' +# list at the bottom of this file. + +# Planck 2015 paper XII Table 4 final column (best fit) +Planck15 = dict( + Oc0=0.2589, + Ob0=0.04860, + Om0=0.3075, + H0=67.74, + n=0.9667, + sigma8=0.8159, + tau=0.066, + z_reion=8.8, + t0=13.799, + Tcmb0=2.7255, + Neff=3.046, + flat=True, + m_nu=[0., 0., 0.06], + reference=("Planck Collaboration 2016, A&A, 594, A13 (Paper XIII)," + " Table 4 (TT, TE, EE + lowP + lensing + ext)") +) + +# Planck 2013 paper XVI Table 5 penultimate column (best fit) +Planck13 = dict( + Oc0=0.25886, + Ob0=0.048252, + Om0=0.30712, + H0=67.77, + n=0.9611, + sigma8=0.8288, + tau=0.0952, + z_reion=11.52, + t0=13.7965, + Tcmb0=2.7255, + Neff=3.046, + flat=True, + m_nu=[0., 0., 0.06], + reference=("Planck Collaboration 2014, A&A, 571, A16 (Paper XVI)," + " Table 5 (Planck + WP + highL + BAO)") +) + + +WMAP9 = dict( + Oc0=0.2402, + Ob0=0.04628, + Om0=0.2865, + H0=69.32, + n=0.9608, + sigma8=0.820, + tau=0.081, + z_reion=10.1, + t0=13.772, + Tcmb0=2.725, + Neff=3.04, + m_nu=0.0, + flat=True, + reference=("Hinshaw et al. 2013, ApJS, 208, 19, " + "doi: 10.1088/0067-0049/208/2/19. " + "Table 4 (WMAP9 + eCMB + BAO + H0, last column)") +) + +WMAP7 = dict( + Oc0=0.226, + Ob0=0.0455, + Om0=0.272, + H0=70.4, + n=0.967, + sigma8=0.810, + tau=0.085, + z_reion=10.3, + t0=13.76, + Tcmb0=2.725, + Neff=3.04, + m_nu=0.0, + flat=True, + reference=("Komatsu et al. 2011, ApJS, 192, 18, " + "doi: 10.1088/0067-0049/192/2/18. " + "Table 1 (WMAP + BAO + H0 ML).") +) + +WMAP5 = dict( + Oc0=0.231, + Ob0=0.0459, + Om0=0.277, + H0=70.2, + n=0.962, + sigma8=0.817, + tau=0.088, + z_reion=11.3, + t0=13.72, + Tcmb0=2.725, + Neff=3.04, + m_nu=0.0, + flat=True, + reference=("Komatsu et al. 2009, ApJS, 180, 330, " + "doi: 10.1088/0067-0049/180/2/330. " + "Table 1 (WMAP + BAO + SN ML).") +) + +# If new parameters are added, this list must be updated +available = ['Planck15', 'Planck13', 'WMAP9', 'WMAP7', 'WMAP5'] diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/parameters.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/parameters.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05b742e1b5a991022e7b07fb4e0ae3a61391db6b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/parameters.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/scalar_inv_efuncs.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/scalar_inv_efuncs.so new file mode 100755 index 0000000000000000000000000000000000000000..8ab4036821ee3767ba082210a3e0cb2c8978fbcd Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/scalar_inv_efuncs.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/setup_package.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/setup_package.py new file mode 100644 index 0000000000000000000000000000000000000000..3cd9f7c3d928c2b9a57845c6438b77d8ca63de27 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/setup_package.py @@ -0,0 +1,5 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def requires_2to3(): + return False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/setup_package.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/setup_package.pyc new file mode 100644 index 0000000000000000000000000000000000000000..799e27a1afb6f2d291f14d472f327ff33f95b21c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/setup_package.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7804fb234647661843615723acf3d2ad110809c0 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_cosmology.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_cosmology.py new file mode 100644 index 0000000000000000000000000000000000000000..58bbbf5db5d277b8730d3ade58becedd7ded80d8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_cosmology.py @@ -0,0 +1,1567 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from io import StringIO + +import pytest +import numpy as np + +from .. import core, funcs +from ...tests.helper import quantity_allclose as allclose +from ...utils.compat import NUMPY_LT_1_14 +from ... import units as u + +try: + import scipy # pylint: disable=W0611 +except ImportError: + HAS_SCIPY = False +else: + HAS_SCIPY = True + + +def test_init(): + """ Tests to make sure the code refuses inputs it is supposed to""" + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=-0.27) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Neff=-1) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, + Tcmb0=u.Quantity([0.0, 2], u.K)) + with pytest.raises(ValueError): + h0bad = u.Quantity([70, 100], u.km / u.s / u.Mpc) + cosmo = core.FlatLambdaCDM(H0=h0bad, Om0=0.27) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=0.5) + with pytest.raises(ValueError): + bad_mnu = u.Quantity([-0.3, 0.2, 0.1], u.eV) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu) + with pytest.raises(ValueError): + bad_mnu = u.Quantity([0.15, 0.2, 0.1], u.eV) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, Neff=2, m_nu=bad_mnu) + with pytest.raises(ValueError): + bad_mnu = u.Quantity([-0.3, 0.2], u.eV) # 2, expecting 3 + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=-0.04) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=0.4) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27) + cosmo.Ob(1) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27) + cosmo.Odm(1) + with pytest.raises(TypeError): + core.default_cosmology.validate(4) + + +def test_basic(): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0, Neff=3.04, + Ob0=0.05) + assert allclose(cosmo.Om0, 0.27) + assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4) + assert allclose(cosmo.Ob0, 0.05) + assert allclose(cosmo.Odm0, 0.27 - 0.05) + # This next test will fail if astropy.const starts returning non-mks + # units by default; see the comment at the top of core.py + assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4) + assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4) + assert allclose(cosmo.Ok0, 0.0) + assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0, + 1.0, rtol=1e-6) + assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) + + cosmo.Onu(1), 1.0, rtol=1e-6) + assert allclose(cosmo.Tcmb0, 2.0 * u.K) + assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5) + assert allclose(cosmo.Neff, 3.04) + assert allclose(cosmo.h, 0.7) + assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc) + + # Make sure setting them as quantities gives the same results + H0 = u.Quantity(70, u.km / (u.s * u.Mpc)) + T = u.Quantity(2.0, u.K) + cosmo = core.FlatLambdaCDM(H0=H0, Om0=0.27, Tcmb0=T, Neff=3.04, Ob0=0.05) + assert allclose(cosmo.Om0, 0.27) + assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4) + assert allclose(cosmo.Ob0, 0.05) + assert allclose(cosmo.Odm0, 0.27 - 0.05) + assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4) + assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4) + assert allclose(cosmo.Ok0, 0.0) + assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0, + 1.0, rtol=1e-6) + assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) + + cosmo.Onu(1), 1.0, rtol=1e-6) + assert allclose(cosmo.Tcmb0, 2.0 * u.K) + assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5) + assert allclose(cosmo.Neff, 3.04) + assert allclose(cosmo.h, 0.7) + assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_units(): + """ Test if the right units are being returned""" + + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0) + assert cosmo.comoving_distance(1.0).unit == u.Mpc + assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc + assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc + assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc + assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc + assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc + assert cosmo.luminosity_distance(1.0).unit == u.Mpc + assert cosmo.lookback_time(1.0).unit == u.Gyr + assert cosmo.lookback_distance(1.0).unit == u.Mpc + assert cosmo.H0.unit == u.km / u.Mpc / u.s + assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s + assert cosmo.Tcmb0.unit == u.K + assert cosmo.Tcmb(1.0).unit == u.K + assert cosmo.Tcmb([0.0, 1.0]).unit == u.K + assert cosmo.Tnu0.unit == u.K + assert cosmo.Tnu(1.0).unit == u.K + assert cosmo.Tnu([0.0, 1.0]).unit == u.K + assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc + assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc + assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin + assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin + assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3 + assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3 + assert cosmo.age(1.0).unit == u.Gyr + assert cosmo.distmod(1.0).unit == u.mag + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_distance_broadcast(): + """ Test array shape broadcasting for functions with single + redshift inputs""" + + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, + m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV)) + z = np.linspace(0.1, 1, 6) + z_reshape2d = z.reshape(2, 3) + z_reshape3d = z.reshape(3, 2, 1) + # Things with units + methods = ['comoving_distance', 'luminosity_distance', + 'comoving_transverse_distance', 'angular_diameter_distance', + 'distmod', 'lookback_time', 'age', 'comoving_volume', + 'differential_comoving_volume', 'kpc_comoving_per_arcmin'] + for method in methods: + g = getattr(cosmo, method) + value_flat = g(z) + assert value_flat.shape == z.shape + value_2d = g(z_reshape2d) + assert value_2d.shape == z_reshape2d.shape + value_3d = g(z_reshape3d) + assert value_3d.shape == z_reshape3d.shape + assert value_flat.unit == value_2d.unit + assert value_flat.unit == value_3d.unit + assert allclose(value_flat, value_2d.flatten()) + assert allclose(value_flat, value_3d.flatten()) + + # Also test unitless ones + methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H', + 'w', 'de_density_scale', 'Onu', 'Ogamma', + 'nu_relative_density'] + for method in methods: + g = getattr(cosmo, method) + value_flat = g(z) + assert value_flat.shape == z.shape + value_2d = g(z_reshape2d) + assert value_2d.shape == z_reshape2d.shape + value_3d = g(z_reshape3d) + assert value_3d.shape == z_reshape3d.shape + assert allclose(value_flat, value_2d.flatten()) + assert allclose(value_flat, value_3d.flatten()) + + # Test some dark energy models + methods = ['Om', 'Ode', 'w', 'de_density_scale'] + for tcosmo in [core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5), + core.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2), + core.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2), + core.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5, + wp=-1.2, wa=-0.2, zp=0.9), + core.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]: + for method in methods: + g = getattr(cosmo, method) + value_flat = g(z) + assert value_flat.shape == z.shape + value_2d = g(z_reshape2d) + assert value_2d.shape == z_reshape2d.shape + value_3d = g(z_reshape3d) + assert value_3d.shape == z_reshape3d.shape + assert allclose(value_flat, value_2d.flatten()) + assert allclose(value_flat, value_3d.flatten()) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_clone(): + """ Test clone operation""" + + cosmo = core.FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.27, + Tcmb0=3.0 * u.K) + z = np.linspace(0.1, 3, 15) + + # First, test with no changes, which should return same object + newclone = cosmo.clone() + assert newclone is cosmo + + # Now change H0 + # Note that H0 affects Ode0 because it changes Ogamma0 + newclone = cosmo.clone(H0=60 * u.km / u.s / u.Mpc) + assert newclone is not cosmo + assert newclone.__class__ == cosmo.__class__ + assert newclone.name == cosmo.name + assert not allclose(newclone.H0.value, cosmo.H0.value) + assert allclose(newclone.H0, 60.0 * u.km / u.s / u.Mpc) + assert allclose(newclone.Om0, cosmo.Om0) + assert allclose(newclone.Ok0, cosmo.Ok0) + assert not allclose(newclone.Ogamma0, cosmo.Ogamma0) + assert not allclose(newclone.Onu0, cosmo.Onu0) + assert allclose(newclone.Tcmb0, cosmo.Tcmb0) + assert allclose(newclone.m_nu, cosmo.m_nu) + assert allclose(newclone.Neff, cosmo.Neff) + + # Compare modified version with directly instantiated one + cmp = core.FlatLambdaCDM(H0=60 * u.km / u.s / u.Mpc, Om0=0.27, + Tcmb0=3.0 * u.K) + assert newclone.__class__ == cmp.__class__ + assert newclone.name == cmp.name + assert allclose(newclone.H0, cmp.H0) + assert allclose(newclone.Om0, cmp.Om0) + assert allclose(newclone.Ode0, cmp.Ode0) + assert allclose(newclone.Ok0, cmp.Ok0) + assert allclose(newclone.Ogamma0, cmp.Ogamma0) + assert allclose(newclone.Onu0, cmp.Onu0) + assert allclose(newclone.Tcmb0, cmp.Tcmb0) + assert allclose(newclone.m_nu, cmp.m_nu) + assert allclose(newclone.Neff, cmp.Neff) + assert allclose(newclone.Om(z), cmp.Om(z)) + assert allclose(newclone.H(z), cmp.H(z)) + assert allclose(newclone.luminosity_distance(z), + cmp.luminosity_distance(z)) + + # Now try changing multiple things + newclone = cosmo.clone(name="New name", H0=65 * u.km / u.s / u.Mpc, + Tcmb0=2.8 * u.K) + assert newclone.__class__ == cosmo.__class__ + assert not newclone.name == cosmo.name + assert not allclose(newclone.H0.value, cosmo.H0.value) + assert allclose(newclone.H0, 65.0 * u.km / u.s / u.Mpc) + assert allclose(newclone.Om0, cosmo.Om0) + assert allclose(newclone.Ok0, cosmo.Ok0) + assert not allclose(newclone.Ogamma0, cosmo.Ogamma0) + assert not allclose(newclone.Onu0, cosmo.Onu0) + assert not allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value) + assert allclose(newclone.Tcmb0, 2.8 * u.K) + assert allclose(newclone.m_nu, cosmo.m_nu) + assert allclose(newclone.Neff, cosmo.Neff) + + # And direct comparison + cmp = core.FlatLambdaCDM(name="New name", H0=65 * u.km / u.s / u.Mpc, + Om0=0.27, Tcmb0=2.8 * u.K) + assert newclone.__class__ == cmp.__class__ + assert newclone.name == cmp.name + assert allclose(newclone.H0, cmp.H0) + assert allclose(newclone.Om0, cmp.Om0) + assert allclose(newclone.Ode0, cmp.Ode0) + assert allclose(newclone.Ok0, cmp.Ok0) + assert allclose(newclone.Ogamma0, cmp.Ogamma0) + assert allclose(newclone.Onu0, cmp.Onu0) + assert allclose(newclone.Tcmb0, cmp.Tcmb0) + assert allclose(newclone.m_nu, cmp.m_nu) + assert allclose(newclone.Neff, cmp.Neff) + assert allclose(newclone.Om(z), cmp.Om(z)) + assert allclose(newclone.H(z), cmp.H(z)) + assert allclose(newclone.luminosity_distance(z), + cmp.luminosity_distance(z)) + + # Try a dark energy class, make sure it can handle w params + cosmo = core.w0waCDM(name="test w0wa", H0=70 * u.km / u.s / u.Mpc, + Om0=0.27, Ode0=0.5, wa=0.1, Tcmb0=4.0 * u.K) + newclone = cosmo.clone(w0=-1.1, wa=0.2) + assert newclone.__class__ == cosmo.__class__ + assert newclone.name == cosmo.name + assert allclose(newclone.H0, cosmo.H0) + assert allclose(newclone.Om0, cosmo.Om0) + assert allclose(newclone.Ode0, cosmo.Ode0) + assert allclose(newclone.Ok0, cosmo.Ok0) + assert not allclose(newclone.w0, cosmo.w0) + assert allclose(newclone.w0, -1.1) + assert not allclose(newclone.wa, cosmo.wa) + assert allclose(newclone.wa, 0.2) + + # Now test exception if user passes non-parameter + with pytest.raises(AttributeError): + newclone = cosmo.clone(not_an_arg=4) + + +def test_xtfuncs(): + """ Test of absorption and lookback integrand""" + cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725) + z = np.array([2.0, 3.2]) + assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378, + rtol=1e-4) + assert allclose(cosmo.lookback_time_integrand(z), + [0.10333179, 0.04644541], rtol=1e-4) + assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402, + rtol=1e-4) + assert allclose(cosmo.abs_distance_integrand(z), + [2.7899584, 3.44104758], rtol=1e-4) + + +def test_repr(): + """ Test string representation of built in classes""" + cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725) + expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, ' + 'Ode0=0.5, Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, ' + 'Ob0=None)').format(' 0. 0. 0.' if NUMPY_LT_1_14 else + '0. 0. 0.') + assert str(cosmo) == expected + + cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725, m_nu=u.Quantity(0.01, u.eV)) + expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Ode0=0.5, ' + 'Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, ' + 'Ob0=None)').format(' 0.01 0.01 0.01' if NUMPY_LT_1_14 else + '0.01 0.01 0.01') + assert str(cosmo) == expected + + cosmo = core.FlatLambdaCDM(50.0, 0.27, Tcmb0=3, Ob0=0.05) + expected = ('FlatLambdaCDM(H0=50 km / (Mpc s), Om0=0.27, ' + 'Tcmb0=3 K, Neff=3.04, m_nu=[{}] eV, Ob0=0.05)').format( + ' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.') + assert str(cosmo) == expected + + cosmo = core.wCDM(60.0, 0.27, 0.6, Tcmb0=2.725, w0=-0.8, name='test1') + expected = ('wCDM(name="test1", H0=60 km / (Mpc s), Om0=0.27, ' + 'Ode0=0.6, w0=-0.8, Tcmb0=2.725 K, Neff=3.04, ' + 'm_nu=[{}] eV, Ob0=None)').format( + ' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.') + assert str(cosmo) == expected + + cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6, name='test2') + expected = ('FlatwCDM(name="test2", H0=65 km / (Mpc s), Om0=0.27, ' + 'w0=-0.6, Tcmb0=0 K, Neff=3.04, m_nu=None, Ob0=None)') + assert str(cosmo) == expected + + cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, Tcmb0=2.725, wa=0.1, name='test3') + expected = ('w0waCDM(name="test3", H0=60 km / (Mpc s), Om0=0.25, ' + 'Ode0=0.4, w0=-0.6, wa=0.1, Tcmb0=2.725 K, Neff=3.04, ' + 'm_nu=[{}] eV, Ob0=None)').format( + ' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.') + assert str(cosmo) == expected + + cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2, name='test4', + Ob0=0.0456789) + expected = ('Flatw0waCDM(name="test4", H0=55 km / (Mpc s), Om0=0.35, ' + 'w0=-0.9, Tcmb0=0 K, Neff=3.04, m_nu=None, ' + 'Ob0=0.0457)') + assert str(cosmo) == expected + + cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, + zp=0.3, name='test5') + expected = ('wpwaCDM(name="test5", H0=50 km / (Mpc s), Om0=0.3, ' + 'Ode0=0.3, wp=-0.9, wa=-0.2, zp=0.3, Tcmb0=0 K, ' + 'Neff=3.04, m_nu=None, Ob0=None)') + assert str(cosmo) == expected + + cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2, Tcmb0=2.725, + m_nu=u.Quantity([0.001, 0.01, 0.015], u.eV)) + expected = ('w0wzCDM(H0=55 km / (Mpc s), Om0=0.4, Ode0=0.8, w0=-1.05, ' + 'wz=-0.2 Tcmb0=2.725 K, Neff=3.04, ' + 'm_nu=[{}] eV, Ob0=None)').format( + ' 0.001 0.01 0.015' if NUMPY_LT_1_14 else + '0.001 0.01 0.015') + assert str(cosmo) == expected + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_flat_z1(): + """ Test a flat cosmology at z=1 against several other on-line + calculators. + """ + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0) + z = 1 + + # Test values were taken from the following web cosmology + # calculators on 27th Feb 2012: + + # Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html + # (http://adsabs.harvard.edu/abs/2006PASP..118.1711W) + # Kempner: http://www.kempner.net/cosmic.php + # iCosmos: http://www.icosmos.co.uk/index.html + + # The order of values below is Wright, Kempner, iCosmos' + assert allclose(cosmo.comoving_distance(z), + [3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4) + assert allclose(cosmo.angular_diameter_distance(z), + [1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4) + assert allclose(cosmo.luminosity_distance(z), + [6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4) + assert allclose(cosmo.lookback_time(z), + [7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3) + assert allclose(cosmo.lookback_distance(z), + [2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3) + + +def test_zeroing(): + """ Tests if setting params to 0s always respects that""" + # Make sure Ode = 0 behaves that way + cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0) + assert allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0]) + assert allclose(cosmo.Ode(1), 0) + # Ogamma0 and Onu + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0) + assert allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0]) + assert allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0]) + assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0]) + assert allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0]) + # Obaryon + cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Ob0=0.0) + assert allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0]) + + +# This class is to test whether the routines work correctly +# if one only overloads w(z) +class test_cos_sub(core.FLRW): + def __init__(self): + core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=0.0, + name="test_cos") + self._w0 = -0.9 + + def w(self, z): + return self._w0 * np.ones_like(z) + +# Similar, but with neutrinos + + +class test_cos_subnu(core.FLRW): + def __init__(self): + core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=3.0, + m_nu=0.1 * u.eV, name="test_cos_nu") + self._w0 = -0.8 + + def w(self, z): + return self._w0 * np.ones_like(z) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_de_subclass(): + # This is the comparison object + z = [0.2, 0.4, 0.6, 0.9] + cosmo = core.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0) + # Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012 + assert allclose(cosmo.luminosity_distance(z), + [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3) + # Now try the subclass that only gives w(z) + cosmo = test_cos_sub() + assert allclose(cosmo.luminosity_distance(z), + [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3) + # Test efunc + assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5) + assert allclose(cosmo.efunc([0.5, 1.0]), + [1.31744953, 1.7489240754], rtol=1e-5) + assert allclose(cosmo.inv_efunc([0.5, 1.0]), + [0.75904236, 0.57178011], rtol=1e-5) + # Test de_density_scale + assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4) + assert allclose(cosmo.de_density_scale([0.5, 1.0]), + [1.12934694, 1.23114444], rtol=1e-4) + + # Add neutrinos for efunc, inv_efunc + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_varyde_lumdist_mathematica(): + """Tests a few varying dark energy EOS models against a mathematica + computation""" + + # w0wa models + z = np.array([0.2, 0.4, 0.9, 1.2]) + cosmo = core.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0) + assert allclose(cosmo.w0, -1.1) + assert allclose(cosmo.wa, 0.2) + + assert allclose(cosmo.luminosity_distance(z), + [1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4) + assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5) + assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]), + [1.0, 0.9246310669529021, 0.9184087000251957]) + + cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0) + assert allclose(cosmo.luminosity_distance(z), + [971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4) + cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5, + Tcmb0=0.0) + assert allclose(cosmo.luminosity_distance(z), + [974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4) + + # wpwa models + cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, + Tcmb0=0.0) + assert allclose(cosmo.wp, -1.1) + assert allclose(cosmo.wa, 0.2) + assert allclose(cosmo.zp, 0.5) + assert allclose(cosmo.luminosity_distance(z), + [1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4) + + cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, + Tcmb0=0.0) + assert allclose(cosmo.wp, -1.1) + assert allclose(cosmo.wa, 0.2) + assert allclose(cosmo.zp, 0.9) + assert allclose(cosmo.luminosity_distance(z), + [1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_matter(): + # Test non-relativistic matter evolution + tcos = core.FlatLambdaCDM(70.0, 0.3, Ob0=0.045) + assert allclose(tcos.Om0, 0.3) + assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc) + assert allclose(tcos.Om(0), 0.3) + assert allclose(tcos.Ob(0), 0.045) + z = np.array([0.0, 0.5, 1.0, 2.0]) + assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455], + rtol=1e-4) + assert allclose(tcos.Ob(z), + [0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4) + assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636], + rtol=1e-4) + # Consistency of dark and baryonic matter evolution with all + # non-relativistic matter + assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z)) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_ocurv(): + # Test Ok evolution + # Flat, boring case + tcos = core.FlatLambdaCDM(70.0, 0.3) + assert allclose(tcos.Ok0, 0.0) + assert allclose(tcos.Ok(0), 0.0) + z = np.array([0.0, 0.5, 1.0, 2.0]) + assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0], + rtol=1e-6) + + # Not flat + tcos = core.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K)) + assert allclose(tcos.Ok0, 0.2) + assert allclose(tcos.Ok(0), 0.2) + assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692], + rtol=1e-4) + + # Test the sum; note that Ogamma/Onu are 0 + assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z), + [1.0, 1.0, 1.0, 1.0], rtol=1e-5) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_ode(): + # Test Ode evolution, turn off neutrinos, cmb + tcos = core.FlatLambdaCDM(70.0, 0.3, Tcmb0=0) + assert allclose(tcos.Ode0, 0.7) + assert allclose(tcos.Ode(0), 0.7) + z = np.array([0.0, 0.5, 1.0, 2.0]) + assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545], + rtol=1e-5) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_ogamma(): + """Tests the effects of changing the temperature of the CMB""" + + # Tested against Ned Wright's advanced cosmology calculator, + # Sep 7 2012. The accuracy of our comparision is limited by + # how many digits it outputs, which limits our test to about + # 0.2% accuracy. The NWACC does not allow one + # to change the number of nuetrino species, fixing that at 3. + # Also, inspection of the NWACC code shows it uses inaccurate + # constants at the 0.2% level (specifically, a_B), + # so we shouldn't expect to match it that well. The integral is + # also done rather crudely. Therefore, we should not expect + # the NWACC to be accurate to better than about 0.5%, which is + # unfortunate, but reflects a problem with it rather than this code. + # More accurate tests below using Mathematica + z = np.array([1.0, 10.0, 500.0, 1000.0]) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4) + + # Next compare with doing the integral numerically in Mathematica, + # which allows more precision in the test. It is at least as + # good as 0.01%, possibly better + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5) + + # Just to be really sure, we also do a version where the integral + # is analytic, which is a Ode = 0 flat universe. In this case + # Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1) + # Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance. + Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26 + Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04 + Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2 + Om0 = 1.0 - Or0 + hubdis = (299792.458 / 70.0) * u.Mpc + cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04) + targvals = 2.0 * hubdis * \ + (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0) + assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5) + + # And integers for z + assert allclose(cosmo.comoving_distance(z.astype(np.int)), + targvals, rtol=1e-5) + + # Try Tcmb0 = 4 + Or0 *= (4.0 / 2.725) ** 4 + Om0 = 1.0 - Or0 + cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04) + targvals = 2.0 * hubdis * \ + (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0) + assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_tcmb(): + cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5) + assert allclose(cosmo.Tcmb0, 2.5 * u.K) + assert allclose(cosmo.Tcmb(2), 7.5 * u.K) + z = [0.0, 1.0, 2.0, 3.0, 9.0] + assert allclose(cosmo.Tcmb(z), + [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6) + # Make sure it's the same for integers + z = [0, 1, 2, 3, 9] + assert allclose(cosmo.Tcmb(z), + [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_tnu(): + cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0) + assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6) + assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6) + z = [0.0, 1.0, 2.0, 3.0] + expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K + assert allclose(cosmo.Tnu(z), expected, rtol=1e-6) + + # Test for integers + z = [0, 1, 2, 3] + assert allclose(cosmo.Tnu(z), expected, rtol=1e-6) + + +def test_efunc_vs_invefunc(): + """ Test that efunc and inv_efunc give inverse values""" + + # Note that all of the subclasses here don't need + # scipy because they don't need to call de_density_scale + # The test following this tests the case where that is needed. + + z0 = 0.5 + z = np.array([0.5, 1.0, 2.0, 5.0]) + + # Below are the 'standard' included cosmologies + # We do the non-standard case in test_efunc_vs_invefunc_flrw, + # since it requires scipy + cosmo = core.LambdaCDM(70, 0.3, 0.5) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.LambdaCDM(70, 0.3, 0.5, m_nu=u.Quantity(0.01, u.eV)) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.FlatLambdaCDM(50.0, 0.27) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.wCDM(60.0, 0.27, 0.6, w0=-0.8) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, wa=0.1) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, zp=0.3) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_efunc_vs_invefunc_flrw(): + """ Test that efunc and inv_efunc give inverse values""" + z0 = 0.5 + z = np.array([0.5, 1.0, 2.0, 5.0]) + + # FLRW is abstract, so requires test_cos_sub defined earlier + # This requires scipy, unlike the built-ins, because it + # calls de_density_scale, which has an integral in it + cosmo = test_cos_sub() + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + # Add neutrinos + cosmo = test_cos_subnu() + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_kpc_methods(): + cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(cosmo.arcsec_per_kpc_comoving(3), + 0.0317179167 * u.arcsec / u.kpc) + assert allclose(cosmo.arcsec_per_kpc_proper(3), + 0.1268716668 * u.arcsec / u.kpc) + assert allclose(cosmo.kpc_comoving_per_arcmin(3), + 1891.6753126 * u.kpc / u.arcmin) + assert allclose(cosmo.kpc_proper_per_arcmin(3), + 472.918828 * u.kpc / u.arcmin) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_comoving_volume(): + + c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0) + c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0) + c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0) + + # test against ned wright's calculator (cubic Gpc) + redshifts = np.array([0.5, 1, 2, 3, 5, 9]) + wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, + 3654.802]) * u.Gpc**3 + wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363, + 3123.814]) * u.Gpc**3 + wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82, + 358.992]) * u.Gpc**3 + # The wright calculator isn't very accurate, so we use a rather + # modest precision + assert allclose(c_flat.comoving_volume(redshifts), wright_flat, + rtol=1e-2) + assert allclose(c_open.comoving_volume(redshifts), + wright_open, rtol=1e-2) + assert allclose(c_closed.comoving_volume(redshifts), + wright_closed, rtol=1e-2) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_differential_comoving_volume(): + from scipy.integrate import quad + + c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0) + c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0) + c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0) + + # test that integration of differential_comoving_volume() + # yields same as comoving_volume() + redshifts = np.array([0.5, 1, 2, 3, 5, 9]) + wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, + 3654.802]) * u.Gpc**3 + wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363, + 3123.814]) * u.Gpc**3 + wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82, + 358.992]) * u.Gpc**3 + # The wright calculator isn't very accurate, so we use a rather + # modest precision. + ftemp = lambda x: c_flat.differential_comoving_volume(x).value + otemp = lambda x: c_open.differential_comoving_volume(x).value + ctemp = lambda x: c_closed.differential_comoving_volume(x).value + # Multiply by solid_angle (4 * pi) + assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0] + for redshift in redshifts]) * u.Mpc**3, + wright_flat, rtol=1e-2) + assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0] + for redshift in redshifts]) * u.Mpc**3, + wright_open, rtol=1e-2) + assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0] + for redshift in redshifts]) * u.Mpc**3, + wright_closed, rtol=1e-2) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_flat_open_closed_icosmo(): + """ Test against the tabulated values generated from icosmo.org + with three example cosmologies (flat, open and closed). + """ + + cosmo_flat = """\ +# from icosmo (icosmo.org) +# Om 0.3 w -1 h 0.7 Ol 0.7 +# z comoving_transvers_dist angular_diameter_dist luminosity_dist + 0.0000000 0.0000000 0.0000000 0.0000000 + 0.16250000 669.77536 576.15085 778.61386 + 0.32500000 1285.5964 970.26143 1703.4152 + 0.50000000 1888.6254 1259.0836 2832.9381 + 0.66250000 2395.5489 1440.9317 3982.6000 + 0.82500000 2855.5732 1564.6976 5211.4210 + 1.0000000 3303.8288 1651.9144 6607.6577 + 1.1625000 3681.1867 1702.2829 7960.5663 + 1.3250000 4025.5229 1731.4077 9359.3408 + 1.5000000 4363.8558 1745.5423 10909.640 + 1.6625000 4651.4830 1747.0359 12384.573 + 1.8250000 4916.5970 1740.3883 13889.387 + 2.0000000 5179.8621 1726.6207 15539.586 + 2.1625000 5406.0204 1709.4136 17096.540 + 2.3250000 5616.5075 1689.1752 18674.888 + 2.5000000 5827.5418 1665.0120 20396.396 + 2.6625000 6010.4886 1641.0890 22013.414 + 2.8250000 6182.1688 1616.2533 23646.796 + 3.0000000 6355.6855 1588.9214 25422.742 + 3.1625000 6507.2491 1563.3031 27086.425 + 3.3250000 6650.4520 1537.6768 28763.205 + 3.5000000 6796.1499 1510.2555 30582.674 + 3.6625000 6924.2096 1485.0852 32284.127 + 3.8250000 7045.8876 1460.2876 33996.408 + 4.0000000 7170.3664 1434.0733 35851.832 + 4.1625000 7280.3423 1410.2358 37584.767 + 4.3250000 7385.3277 1386.9160 39326.870 + 4.5000000 7493.2222 1362.4040 41212.722 + 4.6625000 7588.9589 1340.2135 42972.480 +""" + + cosmo_open = """\ +# from icosmo (icosmo.org) +# Om 0.3 w -1 h 0.7 Ol 0.1 +# z comoving_transvers_dist angular_diameter_dist luminosity_dist + 0.0000000 0.0000000 0.0000000 0.0000000 + 0.16250000 643.08185 553.18868 747.58265 + 0.32500000 1200.9858 906.40441 1591.3062 + 0.50000000 1731.6262 1154.4175 2597.4393 + 0.66250000 2174.3252 1307.8648 3614.8157 + 0.82500000 2578.7616 1413.0201 4706.2399 + 1.0000000 2979.3460 1489.6730 5958.6920 + 1.1625000 3324.2002 1537.2024 7188.5829 + 1.3250000 3646.8432 1568.5347 8478.9104 + 1.5000000 3972.8407 1589.1363 9932.1017 + 1.6625000 4258.1131 1599.2913 11337.226 + 1.8250000 4528.5346 1603.0211 12793.110 + 2.0000000 4804.9314 1601.6438 14414.794 + 2.1625000 5049.2007 1596.5852 15968.097 + 2.3250000 5282.6693 1588.7727 17564.875 + 2.5000000 5523.0914 1578.0261 19330.820 + 2.6625000 5736.9813 1566.4113 21011.694 + 2.8250000 5942.5803 1553.6158 22730.370 + 3.0000000 6155.4289 1538.8572 24621.716 + 3.1625000 6345.6997 1524.4924 26413.975 + 3.3250000 6529.3655 1509.6799 28239.506 + 3.5000000 6720.2676 1493.3928 30241.204 + 3.6625000 6891.5474 1478.0799 32131.840 + 3.8250000 7057.4213 1462.6780 34052.058 + 4.0000000 7230.3723 1446.0745 36151.862 + 4.1625000 7385.9998 1430.7021 38130.224 + 4.3250000 7537.1112 1415.4199 40135.117 + 4.5000000 7695.0718 1399.1040 42322.895 + 4.6625000 7837.5510 1384.1150 44380.133 +""" + + cosmo_closed = """\ +# from icosmo (icosmo.org) +# Om 2 w -1 h 0.7 Ol 0.1 +# z comoving_transvers_dist angular_diameter_dist luminosity_dist + 0.0000000 0.0000000 0.0000000 0.0000000 + 0.16250000 601.80160 517.67879 699.59436 + 0.32500000 1057.9502 798.45297 1401.7840 + 0.50000000 1438.2161 958.81076 2157.3242 + 0.66250000 1718.6778 1033.7912 2857.3019 + 0.82500000 1948.2400 1067.5288 3555.5381 + 1.0000000 2152.7954 1076.3977 4305.5908 + 1.1625000 2312.3427 1069.2914 5000.4410 + 1.3250000 2448.9755 1053.3228 5693.8681 + 1.5000000 2575.6795 1030.2718 6439.1988 + 1.6625000 2677.9671 1005.8092 7130.0873 + 1.8250000 2768.1157 979.86398 7819.9270 + 2.0000000 2853.9222 951.30739 8561.7665 + 2.1625000 2924.8116 924.84161 9249.7167 + 2.3250000 2988.5333 898.80701 9936.8732 + 2.5000000 3050.3065 871.51614 10676.073 + 2.6625000 3102.1909 847.01459 11361.774 + 2.8250000 3149.5043 823.39982 12046.854 + 3.0000000 3195.9966 798.99915 12783.986 + 3.1625000 3235.5334 777.30533 13467.908 + 3.3250000 3271.9832 756.52790 14151.327 + 3.5000000 3308.1758 735.15017 14886.791 + 3.6625000 3339.2521 716.19347 15569.263 + 3.8250000 3368.1489 698.06195 16251.319 + 4.0000000 3397.0803 679.41605 16985.401 + 4.1625000 3422.1142 662.87926 17666.664 + 4.3250000 3445.5542 647.05243 18347.576 + 4.5000000 3469.1805 630.76008 19080.493 + 4.6625000 3489.7534 616.29199 19760.729 +""" + + redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1) + dm = dm * u.Mpc + da = da * u.Mpc + dl = dl * u.Mpc + cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0) + assert allclose(cosmo.comoving_transverse_distance(redshifts), dm) + assert allclose(cosmo.angular_diameter_distance(redshifts), da) + assert allclose(cosmo.luminosity_distance(redshifts), dl) + + redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1) + dm = dm * u.Mpc + da = da * u.Mpc + dl = dl * u.Mpc + cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0) + assert allclose(cosmo.comoving_transverse_distance(redshifts), dm) + assert allclose(cosmo.angular_diameter_distance(redshifts), da) + assert allclose(cosmo.luminosity_distance(redshifts), dl) + + redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1) + dm = dm * u.Mpc + da = da * u.Mpc + dl = dl * u.Mpc + cosmo = core.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0) + assert allclose(cosmo.comoving_transverse_distance(redshifts), dm) + assert allclose(cosmo.angular_diameter_distance(redshifts), da) + assert allclose(cosmo.luminosity_distance(redshifts), dl) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_integral(): + # Test integer vs. floating point inputs + cosmo = core.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50) + assert allclose(cosmo.comoving_distance(3), + cosmo.comoving_distance(3.0), rtol=1e-7) + assert allclose(cosmo.comoving_distance([1, 2, 3, 5]), + cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]), + rtol=1e-7) + assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7) + assert allclose(cosmo.efunc([1, 2, 6]), + cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7) + assert allclose(cosmo.inv_efunc([1, 2, 6]), + cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7) + + +def test_wz(): + cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70) + assert allclose(cosmo.w(1.0), -1.) + assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]), + [-1., -1, -1, -1, -1, -1]) + + cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-0.5) + assert allclose(cosmo.w(1.0), -0.5) + assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]), + [-0.5, -0.5, -0.5, -0.5, -0.5, -0.5]) + assert allclose(cosmo.w0, -0.5) + + cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wz=0.5) + assert allclose(cosmo.w(1.0), -0.5) + assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), + [-1.0, -0.75, -0.5, -0.25, 0.15]) + assert allclose(cosmo.w0, -1.0) + assert allclose(cosmo.wz, 0.5) + + cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5) + assert allclose(cosmo.w0, -1.0) + assert allclose(cosmo.wa, -0.5) + assert allclose(cosmo.w(1.0), -1.25) + assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), + [-1, -1.16666667, -1.25, -1.3, -1.34848485]) + + cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, + wa=0.2, zp=0.5) + assert allclose(cosmo.wp, -0.9) + assert allclose(cosmo.wa, 0.2) + assert allclose(cosmo.zp, 0.5) + assert allclose(cosmo.w(0.5), -0.9) + assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]), + [-0.94848485, -0.93333333, -0.9, -0.84666667, + -0.82380952, -0.78266667]) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_de_densityscale(): + cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70) + z = np.array([0.1, 0.2, 0.5, 1.5, 2.5]) + assert allclose(cosmo.de_density_scale(z), + [1.0, 1.0, 1.0, 1.0, 1.0]) + # Integer check + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5) + assert allclose(cosmo.de_density_scale(z), + [1.15369, 1.31453, 1.83712, 3.95285, 6.5479], + rtol=1e-4) + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5) + assert allclose(cosmo.de_density_scale(z), + [0.746048, 0.5635595, 0.25712378, 0.026664129, + 0.0035916468], rtol=1e-4) + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5) + assert allclose(cosmo.de_density_scale(z), + [0.9934201, 0.9767912, 0.897450, + 0.622236, 0.4458753], rtol=1e-4) + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, + wa=0.2, zp=0.5) + assert allclose(cosmo.de_density_scale(z), + [1.012246048, 1.0280102, 1.087439, + 1.324988, 1.565746], rtol=1e-4) + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_age(): + # WMAP7 but with Omega_relativisitic = 0 + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr) + assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr) + assert allclose(tcos.age([1., 5.]), + [5.97113193, 1.20553129] * u.Gyr) + assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr) + + # Add relativistic species + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0) + assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr) + assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr) + + # And massive neutrinos + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0, + m_nu=0.1 * u.eV) + assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr) + assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_distmod(): + # WMAP7 but with Omega_relativisitic = 0 + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc) + assert allclose(tcos.distmod([1, 5]), + [44.124857, 48.40167258] * u.mag) + assert allclose(tcos.distmod([1., 5.]), + [44.124857, 48.40167258] * u.mag) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_neg_distmod(): + # Cosmology with negative luminosity distances (perfectly okay, + # if obscure) + tcos = core.LambdaCDM(70, 0.2, 1.3, Tcmb0=0) + assert allclose(tcos.luminosity_distance([50, 100]), + [16612.44047622, -46890.79092244] * u.Mpc) + assert allclose(tcos.distmod([50, 100]), + [46.102167189, 48.355437790944] * u.mag) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_critical_density(): + # WMAP7 but with Omega_relativistic = 0 + # These tests will fail if astropy.const starts returning non-mks + # units by default; see the comment at the top of core.py + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(tcos.critical_density0, + 9.309668456020899e-30 * u.g / u.cm**3) + assert allclose(tcos.critical_density0, + tcos.critical_density(0)) + assert allclose(tcos.critical_density([1, 5]), + [2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3) + assert allclose(tcos.critical_density([1., 5.]), + [2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_comoving_distance_z1z2(): + tcos = core.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0) + with pytest.raises(ValueError): # test diff size z1, z2 fail + tcos._comoving_distance_z1z2((1, 2), (3, 4, 5)) + # Comoving distances are invertible + assert allclose(tcos._comoving_distance_z1z2(1, 2), + -tcos._comoving_distance_z1z2(2, 1)) + + z1 = 0, 0, 2, 0.5, 1 + z2 = 2, 1, 1, 2.5, 1.1 + results = (3767.90579253, + 2386.25591391, + -1381.64987862, + 2893.11776663, + 174.1524683) * u.Mpc + + assert allclose(tcos._comoving_distance_z1z2(z1, z2), + results) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_comoving_transverse_distance_z1z2(): + tcos = core.FlatLambdaCDM(100, 0.3, Tcmb0=0.0) + with pytest.raises(ValueError): # test diff size z1, z2 fail + tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5)) + # Tests that should actually work, target values computed with + # http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML + # Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686) + assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2), + 1313.2232194828466 * u.Mpc) + + # In a flat universe comoving distance and comoving transverse + # distance are identical + z1 = 0, 0, 2, 0.5, 1 + z2 = 2, 1, 1, 2.5, 1.1 + + assert allclose(tcos._comoving_distance_z1z2(z1, z2), + tcos._comoving_transverse_distance_z1z2(z1, z2)) + + # Test non-flat cases to avoid simply testing + # comoving_distance_z1z2. Test array, array case. + tcos = core.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0) + results = (3535.931375645655, + 2226.430046551708, + -1208.6817970036532, + 2595.567367601969, + 151.36592003406884) * u.Mpc + + assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), + results) + + # Test positive curvature with scalar, array combination. + tcos = core.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0) + z1 = 0.1 + z2 = 0, 0.1, 0.2, 0.5, 1.1, 2 + results = (-281.31602666724865, + 0., + 248.58093707820436, + 843.9331377460543, + 1618.6104987686672, + 2287.5626543279927) * u.Mpc + + assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), + results) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_angular_diameter_distance_z1z2(): + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + with pytest.raises(ValueError): # test diff size z1, z2 fail + tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5]) + # Tests that should actually work + assert allclose(tcos.angular_diameter_distance_z1z2(1, 2), + 646.22968662822018 * u.Mpc) + + z1 = 0, 0, 2, 0.5, 1 + z2 = 2, 1, 1, 2.5, 1.1 + results = (1760.0628637762106, + 1670.7497657219858, + -969.34452994, + 1159.0970895962193, + 115.72768186186921) * u.Mpc + + assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2), + results) + + z1 = 0.1 + z2 = 0.1, 0.2, 0.5, 1.1, 2 + results = (0., + 332.09893173, + 986.35635069, + 1508.37010062, + 1621.07937976) * u.Mpc + assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2), + results) + + # Non-flat (positive Ok0) test + tcos = core.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0) + assert allclose(tcos.angular_diameter_distance_z1z2(1, 2), + 620.1175337852428 * u.Mpc) + # Non-flat (negative Ok0) test + tcos = core.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0) + assert allclose(tcos.angular_diameter_distance_z1z2(1, 2), + 228.42914659246014 * u.Mpc) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_absorption_distance(): + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(tcos.absorption_distance([1, 3]), + [1.72576635, 7.98685853]) + assert allclose(tcos.absorption_distance([1., 3.]), + [1.72576635, 7.98685853]) + assert allclose(tcos.absorption_distance(3), 7.98685853) + assert allclose(tcos.absorption_distance(3.), 7.98685853) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_massivenu_basic(): + # Test no neutrinos case + tcos = core.FlatLambdaCDM(70.4, 0.272, Neff=4.05, + Tcmb0=2.725 * u.K, m_nu=u.Quantity(0, u.eV)) + assert allclose(tcos.Neff, 4.05) + assert not tcos.has_massive_nu + mnu = tcos.m_nu + assert len(mnu) == 4 + assert mnu.unit == u.eV + assert allclose(mnu, [0.0, 0.0, 0.0, 0.0] * u.eV) + assert allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05, + rtol=1e-6) + assert allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05, + rtol=1e-6) + + # Alternative no neutrinos case + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0 * u.K, + m_nu=u.Quantity(0.4, u.eV)) + assert not tcos.has_massive_nu + assert tcos.m_nu is None + + # Test basic setting, retrieval of values + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725 * u.K, + m_nu=u.Quantity([0.0, 0.01, 0.02], u.eV)) + assert tcos.has_massive_nu + mnu = tcos.m_nu + assert len(mnu) == 3 + assert mnu.unit == u.eV + assert allclose(mnu, [0.0, 0.01, 0.02] * u.eV) + + # All massive neutrinos case + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725, + m_nu=u.Quantity(0.1, u.eV), Neff=3.1) + assert allclose(tcos.Neff, 3.1) + assert tcos.has_massive_nu + mnu = tcos.m_nu + assert len(mnu) == 3 + assert mnu.unit == u.eV + assert allclose(mnu, [0.1, 0.1, 0.1] * u.eV) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_distances(): + # Test distance calculations for various special case + # scenarios (no relativistic species, normal, massive neutrinos) + # These do not come from external codes -- they are just internal + # checks to make sure nothing changes if we muck with the distance + # calculators + + z = np.array([1.0, 2.0, 3.0, 4.0]) + + # The pattern here is: no relativistic species, the relativistic + # species with massless neutrinos, then massive neutrinos + cos = core.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [2953.93001902, 4616.7134253, 5685.07765971, + 6440.80611897] * u.Mpc, rtol=1e-4) + cos = core.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [3037.12620424, 4776.86236327, 5889.55164479, + 6671.85418235] * u.Mpc, rtol=1e-4) + cos = core.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2471.80626824, 3567.1902565, 4207.15995626, + 4638.20476018] * u.Mpc, rtol=1e-4) + # Flat + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [3180.83488552, 5060.82054204, 6253.6721173, + 7083.5374303] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [3180.42662867, 5059.60529655, 6251.62766102, + 7080.71698117] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2337.54183142, 3371.91131264, 3988.40711188, + 4409.09346922] * u.Mpc, rtol=1e-4) + # Add w + cos = core.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [3216.8296894, 5117.2097601, 6317.05995437, + 7149.68648536] * u.Mpc, rtol=1e-4) + cos = core.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [3143.56537758, 5000.32196494, 6184.11444601, + 7009.80166062] * u.Mpc, rtol=1e-4) + cos = core.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2337.76035371, 3372.1971387, 3988.71362289, + 4409.40817174] * u.Mpc, rtol=1e-4) + # Non-flat w + cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [2849.6163356, 4428.71661565, 5450.97862778, + 6179.37072324] * u.Mpc, rtol=1e-4) + cos = core.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2904.35580229, 4511.11471267, 5543.43643353, + 6275.9206788] * u.Mpc, rtol=1e-4) + cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2473.32522734, 3581.54519631, 4232.41674426, + 4671.83818117] * u.Mpc, rtol=1e-4) + # w0wa + cos = core.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [2937.7807638, 4572.59950903, 5611.52821924, + 6339.8549956] * u.Mpc, rtol=1e-4) + cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2907.34722624, 4539.01723198, 5593.51611281, + 6342.3228444] * u.Mpc, rtol=1e-4) + cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2507.18336722, 3633.33231695, 4292.44746919, + 4736.35404638] * u.Mpc, rtol=1e-4) + # Flatw0wa + cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [3123.29892781, 4956.15204302, 6128.15563818, + 6948.26480378] * u.Mpc, rtol=1e-4) + cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [3122.92671907, 4955.03768936, 6126.25719576, + 6945.61856513] * u.Mpc, rtol=1e-4) + cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2337.70072701, 3372.13719963, 3988.6571093, + 4409.35399673] * u.Mpc, rtol=1e-4) + # wpwa + cos = core.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [2954.68975298, 4599.83254834, 5643.04013201, + 6373.36147627] * u.Mpc, rtol=1e-4) + cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1, + Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2919.00656215, 4558.0218123, 5615.73412391, + 6366.10224229] * u.Mpc, rtol=1e-4) + cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0, + Neff=4, m_nu=u.Quantity(5.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2629.48489827, 3874.13392319, 4614.31562397, + 5116.51184842] * u.Mpc, rtol=1e-4) + + # w0wz + cos = core.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [3051.68786716, 4756.17714818, 5822.38084257, + 6562.70873734] * u.Mpc, rtol=1e-4) + cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, + Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2997.8115653, 4686.45599916, 5764.54388557, + 6524.17408738] * u.Mpc, rtol=1e-4) + cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0, + Neff=4, m_nu=u.Quantity(5.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2676.73467639, 3940.57967585, 4686.90810278, + 5191.54178243] * u.Mpc, rtol=1e-4) + + # Also test different numbers of massive neutrinos + # for FlatLambdaCDM to give the scalar nu density functions a + # work out + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, + m_nu=u.Quantity([10.0, 0, 0], u.eV)) + assert allclose(cos.comoving_distance(z), + [2777.71589173, 4186.91111666, 5046.0300719, + 5636.10397302] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, + m_nu=u.Quantity([10.0, 5, 0], u.eV)) + assert allclose(cos.comoving_distance(z), + [2636.48149391, 3913.14102091, 4684.59108974, + 5213.07557084] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, + m_nu=u.Quantity([4.0, 5, 9], u.eV)) + assert allclose(cos.comoving_distance(z), + [2563.5093049, 3776.63362071, 4506.83448243, + 5006.50158829] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2, + m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV)) + assert allclose(cos.comoving_distance(z), + [2525.58017482, 3706.87633298, 4416.58398847, + 4901.96669755] * u.Mpc, rtol=1e-4) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_massivenu_density(): + # Testing neutrino density calculation + + # Simple test cosmology, where we compare rho_nu and rho_gamma + # against the exact formula (eq 24/25 of Komatsu et al. 2011) + # computed using Mathematica. The approximation we use for f(y) + # is only good to ~ 0.5% (with some redshift dependence), so that's + # what we test to. + ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0]) + nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) + # First try 3 massive neutrinos, all 100 eV -- note this is a universe + # seriously dominated by neutrinos! + tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(100.0, u.eV)) + assert tcos.has_massive_nu + assert tcos.Neff == 3 + nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323, + 15633.5, 171.801]) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) + assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3) + + # Next, slightly less massive + tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.25, u.eV)) + nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312, + 39.1005, 1.11086]) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, + rtol=5e-3) + + # For this one also test Onu directly + onu_exp = np.array([0.01890217, 0.05244681, 0.0638236, + 0.06999286, 0.1344951]) + assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) + + # And fairly light + tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.01, u.eV)) + + nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348, + 1.90671, 1.00021]) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, + rtol=5e-3) + onu_exp = np.array([0.00066599, 0.00172677, 0.0020732, + 0.00268404, 0.0978313]) + assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) + assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048], + rtol=1e-4) + assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534], + rtol=1e-4) + + # Now a mixture of neutrino masses, with non-integer Neff + tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04, + m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV)) + nurel_exp = nuprefac * tcos.Neff * \ + np.array([149.386233, 74.87915, 50.0518, + 14.002403, 1.03702333]) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, + rtol=5e-3) + onu_exp = np.array([0.00584959, 0.01493142, 0.01772291, + 0.01963451, 0.10227728]) + assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) + + # Integer redshifts + ztest = ztest.astype(np.int) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, + rtol=5e-3) + assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_z_at_value(): + # These are tests of expected values, and hence have less precision + # than the roundtrip tests below (test_z_at_value_roundtrip); + # here we have to worry about the cosmological calculations + # giving slightly different values on different architectures, + # there we are checking internal consistency on the same architecture + # and so can be more demanding + z_at_value = funcs.z_at_value + cosmo = core.Planck13 + d = cosmo.luminosity_distance(3) + assert allclose(z_at_value(cosmo.luminosity_distance, d), 3, + rtol=1e-8) + assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.198122684356, + rtol=1e-6) + assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc), + 1.3685790653802761, rtol=1e-6) + assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr), + 0.7951983674601507, rtol=1e-6) + assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, + zmax=2), 0.68127769625288614, rtol=1e-6) + assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, + zmin=2.5), 3.7914908028272083, rtol=1e-6) + assert allclose(z_at_value(cosmo.distmod, 46 * u.mag), + 1.9913891680278133, rtol=1e-6) + + # test behaviour when the solution is outside z limits (should + # raise a CosmologyError) + with pytest.raises(core.CosmologyError): + z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5) + with pytest.raises(core.CosmologyError): + z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_z_at_value_roundtrip(): + """ + Calculate values from a known redshift, and then check that + z_at_value returns the right answer. + """ + z = 0.5 + + # Skip Ok, w, de_density_scale because in the Planck13 cosmology + # they are redshift independent and hence uninvertable, + # *_distance_z1z2 methods take multiple arguments, so require + # special handling + # clone isn't a redshift-dependent method + skip = ('Ok', + 'angular_diameter_distance_z1z2', + 'clone', + 'de_density_scale', 'w') + + import inspect + methods = inspect.getmembers(core.Planck13, predicate=inspect.ismethod) + + for name, func in methods: + if name.startswith('_') or name in skip: + continue + print('Round-trip testing {0}'.format(name)) + fval = func(z) + # we need zmax here to pick the right solution for + # angular_diameter_distance and related methods. + # Be slightly more generous with rtol than the default 1e-8 + # used in z_at_value + assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5), + rtol=2e-8) + + # Test distance functions between two redshifts + z2 = 2.0 + func_z1z2 = [lambda z1: core.Planck13._comoving_distance_z1z2(z1, z2), + lambda z1: + core.Planck13._comoving_transverse_distance_z1z2(z1, z2), + lambda z1: + core.Planck13.angular_diameter_distance_z1z2(z1, z2)] + for func in func_z1z2: + fval = func(z) + assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5), + rtol=2e-8) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_cosmology.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_cosmology.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8435012f8f0d59a3558eb944a0ccaa9847611a0a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_cosmology.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_pickle.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..d553179f8494add6540681823b3acb643389db83 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_pickle.py @@ -0,0 +1,19 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import, division, print_function, unicode_literals + +import pytest + +from ...tests.helper import pickle_protocol, check_pickling_recovery +from ...extern.six.moves import zip +from ... import cosmology as cosm + +originals = [cosm.FLRW] +xfails = [False] + + +@pytest.mark.parametrize(("original", "xfail"), + zip(originals, xfails)) +def test_flrw(pickle_protocol, original, xfail): + if xfail: + pytest.xfail() + check_pickling_recovery(original, pickle_protocol) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_pickle.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_pickle.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5548cc1191e9a785b0260232c0d62dc230ef8457 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cosmology/tests/test_pickle.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cython_version.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cython_version.py new file mode 100644 index 0000000000000000000000000000000000000000..878671d1f89a402c149173c58fa09c36e728ba68 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cython_version.py @@ -0,0 +1,2 @@ +# Generated file; do not modify +cython_version = '0.28.5' diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cython_version.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cython_version.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69166c899ed75436a3021311d973e9d6b802c0d8 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/cython_version.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..29db62b9ced08059e8afcb07a17f03b74e35ece1 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/__init__.py @@ -0,0 +1,5 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This subpackage contains modules and packages for interpreting data storage +formats used by and in astropy. +""" diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc0bc79cd7b4d74fdead46eae319dba30963e483 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..64b784a457fb7b06f4c8e38faa5bca452490d56e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/__init__.py @@ -0,0 +1,46 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" An extensible ASCII table reader and writer. + +""" + +from __future__ import absolute_import, division, print_function + +from .core import (InconsistentTableError, + ParameterError, + NoType, StrType, NumType, FloatType, IntType, AllType, + Column, + BaseInputter, ContinuationLinesInputter, + BaseHeader, + BaseData, + BaseOutputter, TableOutputter, + BaseReader, + BaseSplitter, DefaultSplitter, WhitespaceSplitter, + convert_numpy, + masked + ) +from .basic import (Basic, BasicHeader, BasicData, + Rdb, + Csv, + Tab, + NoHeader, + CommentedHeader) +from .fastbasic import (FastBasic, + FastCsv, + FastTab, + FastNoHeader, + FastCommentedHeader, + FastRdb) +from .cds import Cds +from .ecsv import Ecsv +from .latex import Latex, AASTex, latexdicts +from .html import HTML +from .ipac import Ipac +from .daophot import Daophot +from .sextractor import SExtractor +from .fixedwidth import (FixedWidth, FixedWidthNoHeader, + FixedWidthTwoLine, FixedWidthSplitter, + FixedWidthHeader, FixedWidthData) +from .rst import RST +from .ui import (set_guess, get_reader, read, get_writer, write, get_read_trace) + +from . import connect diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6794a4ef95c94441903e467b7c5f2cc5578977a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/basic.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..1b753f4bcb86df4910a706ecfcc147323507add5 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/basic.py @@ -0,0 +1,393 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""An extensible ASCII table reader and writer. + +basic.py: + Basic table read / write functionality for simple character + delimited files with various options for column header definition. + +:Copyright: Smithsonian Astrophysical Observatory (2011) +:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu) +""" + +from __future__ import absolute_import, division, print_function + +import re + +from . import core +from ...extern.six.moves import zip + + +class BasicHeader(core.BaseHeader): + """ + Basic table Header Reader + + Set a few defaults for common ascii table formats + (start at line 0, comments begin with ``#`` and possibly white space) + """ + start_line = 0 + comment = r'\s*#' + write_comment = '# ' + + +class BasicData(core.BaseData): + """ + Basic table Data Reader + + Set a few defaults for common ascii table formats + (start at line 1, comments begin with ``#`` and possibly white space) + """ + start_line = 1 + comment = r'\s*#' + write_comment = '# ' + + +class Basic(core.BaseReader): + r""" + Read a character-delimited table with a single header line at the top + followed by data lines to the end of the table. Lines beginning with # as + the first non-whitespace character are comments. This reader is highly + configurable. + :: + + rdr = ascii.get_reader(Reader=ascii.Basic) + rdr.header.splitter.delimiter = ' ' + rdr.data.splitter.delimiter = ' ' + rdr.header.start_line = 0 + rdr.data.start_line = 1 + rdr.data.end_line = None + rdr.header.comment = r'\s*#' + rdr.data.comment = r'\s*#' + + Example table:: + + # Column definition is the first uncommented line + # Default delimiter is the space character. + apples oranges pears + + # Data starts after the header column definition, blank lines ignored + 1 2 3 + 4 5 6 + """ + _format_name = 'basic' + _description = 'Basic table with custom delimiters' + + header_class = BasicHeader + data_class = BasicData + + +class NoHeaderHeader(BasicHeader): + """ + Reader for table header without a header + + Set the start of header line number to `None`, which tells the basic + reader there is no header line. + """ + start_line = None + + +class NoHeaderData(BasicData): + """ + Reader for table data without a header + + Data starts at first uncommented line since there is no header line. + """ + start_line = 0 + + +class NoHeader(Basic): + """ + Read a table with no header line. Columns are autonamed using + header.auto_format which defaults to "col%d". Otherwise this reader + the same as the :class:`Basic` class from which it is derived. Example:: + + # Table data + 1 2 "hello there" + 3 4 world + """ + _format_name = 'no_header' + _description = 'Basic table with no headers' + header_class = NoHeaderHeader + data_class = NoHeaderData + + +class CommentedHeaderHeader(BasicHeader): + """ + Header class for which the column definition line starts with the + comment character. See the :class:`CommentedHeader` class for an example. + """ + + def process_lines(self, lines): + """ + Return only lines that start with the comment regexp. For these + lines strip out the matching characters. + """ + re_comment = re.compile(self.comment) + for line in lines: + match = re_comment.match(line) + if match: + yield line[match.end():] + + def write(self, lines): + lines.append(self.write_comment + self.splitter.join(self.colnames)) + + +class CommentedHeader(Basic): + """ + Read a file where the column names are given in a line that begins with + the header comment character. ``header_start`` can be used to specify the + line index of column names, and it can be a negative index (for example -1 + for the last commented line). The default delimiter is the + character.:: + + # col1 col2 col3 + # Comment line + 1 2 3 + 4 5 6 + """ + _format_name = 'commented_header' + _description = 'Column names in a commented line' + + header_class = CommentedHeaderHeader + data_class = NoHeaderData + + def read(self, table): + """ + Read input data (file-like object, filename, list of strings, or + single string) into a Table and return the result. + """ + out = super(CommentedHeader, self).read(table) + + # Strip off the comment line set as the header line for + # commented_header format (first by default). + if 'comments' in out.meta: + idx = self.header.start_line + if idx < 0: + idx = len(out.meta['comments']) + idx + out.meta['comments'] = out.meta['comments'][:idx] + out.meta['comments'][idx+1:] + if not out.meta['comments']: + del out.meta['comments'] + + return out + + def write_header(self, lines, meta): + """ + Write comment lines after, rather than before, the header. + """ + self.header.write(lines) + self.header.write_comments(lines, meta) + + +class TabHeaderSplitter(core.DefaultSplitter): + """Split lines on tab and do not remove whitespace""" + delimiter = '\t' + process_line = None + + +class TabDataSplitter(TabHeaderSplitter): + """ + Don't strip data value whitespace since that is significant in TSV tables + """ + process_val = None + skipinitialspace = False + + +class TabHeader(BasicHeader): + """ + Reader for header of tables with tab separated header + """ + splitter_class = TabHeaderSplitter + + +class TabData(BasicData): + """ + Reader for data of tables with tab separated data + """ + splitter_class = TabDataSplitter + + +class Tab(Basic): + """ + Read a tab-separated file. Unlike the :class:`Basic` reader, whitespace is + not stripped from the beginning and end of either lines or individual column + values. + + Example:: + + col1 col2 col3 + # Comment line + 1 2 5 + """ + _format_name = 'tab' + _description = 'Basic table with tab-separated values' + header_class = TabHeader + data_class = TabData + + +class CsvSplitter(core.DefaultSplitter): + """ + Split on comma for CSV (comma-separated-value) tables + """ + delimiter = ',' + + +class CsvHeader(BasicHeader): + """ + Header that uses the :class:`astropy.io.ascii.basic.CsvSplitter` + """ + splitter_class = CsvSplitter + comment = None + write_comment = None + + +class CsvData(BasicData): + """ + Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter` + """ + splitter_class = CsvSplitter + fill_values = [(core.masked, '')] + comment = None + write_comment = None + + +class Csv(Basic): + """ + Read a CSV (comma-separated-values) file. + + Example:: + + num,ra,dec,radius,mag + 1,32.23222,10.1211,0.8,18.1 + 2,38.12321,-88.1321,2.2,17.0 + + Plain csv (comma separated value) files typically contain as many entries + as there are columns on each line. In contrast, common spreadsheet editors + stop writing if all remaining cells on a line are empty, which can lead to + lines where the rightmost entries are missing. This Reader can deal with + such files. + Masked values (indicated by an empty '' field value when reading) are + written out in the same way with an empty ('') field. This is different + from the typical default for `astropy.io.ascii` in which missing values are + indicated by ``--``. + + Example:: + + num,ra,dec,radius,mag + 1,32.23222,10.1211 + 2,38.12321,-88.1321,2.2,17.0 + """ + _format_name = 'csv' + _io_registry_can_write = True + _description = 'Comma-separated-values' + + header_class = CsvHeader + data_class = CsvData + + def inconsistent_handler(self, str_vals, ncols): + """ + Adjust row if it is too short. + + If a data row is shorter than the header, add empty values to make it the + right length. + Note that this will *not* be called if the row already matches the header. + + Parameters + ---------- + str_vals : list + A list of value strings from the current row of the table. + ncols : int + The expected number of entries from the table header. + + Returns + ------- + str_vals : list + List of strings to be parsed into data entries in the output table. + """ + if len(str_vals) < ncols: + str_vals.extend((ncols - len(str_vals)) * ['']) + + return str_vals + + +class RdbHeader(TabHeader): + """ + Header for RDB tables + """ + col_type_map = {'n': core.NumType, + 's': core.StrType} + + def get_type_map_key(self, col): + return col.raw_type[-1] + + def get_cols(self, lines): + """ + Initialize the header Column objects from the table ``lines``. + + This is a specialized get_cols for the RDB type: + Line 0: RDB col names + Line 1: RDB col definitions + Line 2+: RDB data rows + + + Parameters + ---------- + lines : list + List of table lines + + Returns + ------- + None + + """ + header_lines = self.process_lines(lines) # this is a generator + header_vals_list = [hl for _, hl in zip(range(2), self.splitter(header_lines))] + if len(header_vals_list) != 2: + raise ValueError('RDB header requires 2 lines') + self.names, raw_types = header_vals_list + + if len(self.names) != len(raw_types): + raise ValueError('RDB header mismatch between number of column names and column types') + + if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in raw_types): + raise ValueError('RDB types definitions do not all match [num](N|S): {}'.format(raw_types)) + + self._set_cols_from_names() + for col, raw_type in zip(self.cols, raw_types): + col.raw_type = raw_type + col.type = self.get_col_type(col) + + def write(self, lines): + lines.append(self.splitter.join(self.colnames)) + rdb_types = [] + for col in self.cols: + # Check if dtype.kind is string or unicode. See help(np.core.numerictypes) + rdb_type = 'S' if col.info.dtype.kind in ('S', 'U') else 'N' + rdb_types.append(rdb_type) + + lines.append(self.splitter.join(rdb_types)) + + +class RdbData(TabData): + """ + Data reader for RDB data. Starts reading at line 2. + """ + start_line = 2 + + +class Rdb(Tab): + """ + Read a tab-separated file with an extra line after the column definition + line. The RDB format meets this definition. Example:: + + col1 col2 col3 + N S N + 1 2 5 + + In this reader the second line is just ignored. + """ + _format_name = 'rdb' + _io_registry_format_aliases = ['rdb'] + _io_registry_suffix = '.rdb' + _description = 'Tab-separated with a type definition header line' + + header_class = RdbHeader + data_class = RdbData diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/basic.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/basic.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1afae899d7632a4ac3d3c3487f438d739673bd5c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/basic.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/cds.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/cds.py new file mode 100644 index 0000000000000000000000000000000000000000..e3f6356ad7f668c4cfe5f3ff6b96cebc943e61e6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/cds.py @@ -0,0 +1,322 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""An extensible ASCII table reader and writer. + +cds.py: + Classes to read CDS / Vizier table format + +:Copyright: Smithsonian Astrophysical Observatory (2011) +:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu) +""" + +from __future__ import absolute_import, division, print_function + +import fnmatch +import itertools +import re +import os + +from . import core +from . import fixedwidth + +from ...utils.compat import suppress +from ...extern.six.moves import range + + +__doctest_skip__ = ['*'] + + +class CdsHeader(core.BaseHeader): + col_type_map = {'e': core.FloatType, + 'f': core.FloatType, + 'i': core.IntType, + 'a': core.StrType} + + 'The ReadMe file to construct header from.' + readme = None + + def get_type_map_key(self, col): + match = re.match(r'\d*(\S)', col.raw_type.lower()) + if not match: + raise ValueError('Unrecognized CDS format "{}" for column "{}"'.format( + col.raw_type, col.name)) + return match.group(1) + + def get_cols(self, lines): + """ + Initialize the header Column objects from the table ``lines`` for a CDS + header. + + Parameters + ---------- + lines : list + List of table lines + + """ + + # Read header block for the table ``self.data.table_name`` from the read + # me file ``self.readme``. + if self.readme and self.data.table_name: + in_header = False + readme_inputter = core.BaseInputter() + f = readme_inputter.get_lines(self.readme) + # Header info is not in data lines but in a separate file. + lines = [] + comment_lines = 0 + for line in f: + line = line.strip() + if in_header: + lines.append(line) + if line.startswith(('------', '=======')): + comment_lines += 1 + if comment_lines == 3: + break + else: + match = re.match(r'Byte-by-byte Description of file: (?P.+)$', + line, re.IGNORECASE) + if match: + # Split 'name' in case in contains multiple files + names = [s for s in re.split('[, ]+', match.group('name')) + if s] + # Iterate on names to find if one matches the tablename + # including wildcards. + for pattern in names: + if fnmatch.fnmatch(self.data.table_name, pattern): + in_header = True + lines.append(line) + break + + else: + raise core.InconsistentTableError("Can't find table {0} in {1}".format( + self.data.table_name, self.readme)) + + found_line = False + + for i_col_def, line in enumerate(lines): + if re.match(r'Byte-by-byte Description', line, re.IGNORECASE): + found_line = True + elif found_line: # First line after list of file descriptions + i_col_def -= 1 # Set i_col_def to last description line + break + + re_col_def = re.compile(r"""\s* + (?P \d+ \s* -)? \s* + (?P \d+) \s+ + (?P [\w.]+) \s+ + (?P \S+) \s+ + (?P \S+) + (\s+ (?P \S.*))?""", + re.VERBOSE) + + cols = [] + for line in itertools.islice(lines, i_col_def+4, None): + if line.startswith(('------', '=======')): + break + match = re_col_def.match(line) + if match: + col = core.Column(name=match.group('name')) + col.start = int(re.sub(r'[-\s]', '', + match.group('start') or match.group('end'))) - 1 + col.end = int(match.group('end')) + col.unit = match.group('units') + if col.unit == '---': + col.unit = None # "---" is the marker for no unit in CDS table + col.description = (match.group('descr') or '').strip() + col.raw_type = match.group('format') + col.type = self.get_col_type(col) + + match = re.match( + r'\? (?P =)? (?P \S*) (\s+ (?P \S.*))?', col.description, re.VERBOSE) + if match: + col.description = (match.group('descriptiontext') or '').strip() + if issubclass(col.type, core.FloatType): + fillval = 'nan' + else: + fillval = '0' + + if match.group('nullval') == '-': + col.null = '---' + # CDS tables can use -, --, ---, or ---- to mark missing values + # see https://github.com/astropy/astropy/issues/1335 + for i in [1, 2, 3, 4]: + self.data.fill_values.append(('-'*i, fillval, col.name)) + else: + col.null = match.group('nullval') + self.data.fill_values.append((col.null, fillval, col.name)) + + cols.append(col) + else: # could be a continuation of the previous col's description + if cols: + cols[-1].description += line.strip() + else: + raise ValueError('Line "{}" not parsable as CDS header'.format(line)) + + self.names = [x.name for x in cols] + + self.cols = cols + + +class CdsData(core.BaseData): + """CDS table data reader + """ + splitter_class = fixedwidth.FixedWidthSplitter + + def process_lines(self, lines): + """Skip over CDS header by finding the last section delimiter""" + # If the header has a ReadMe and data has a filename + # then no need to skip, as the data lines do not have header + # info. The ``read`` method adds the table_name to the ``data`` + # attribute. + if self.header.readme and self.table_name: + return lines + i_sections = [i for i, x in enumerate(lines) + if x.startswith(('------', '======='))] + if not i_sections: + raise core.InconsistentTableError('No CDS section delimiter found') + return lines[i_sections[-1]+1:] + + +class Cds(core.BaseReader): + """Read a CDS format table. See http://vizier.u-strasbg.fr/doc/catstd.htx. + Example:: + + Table: Table name here + = ============================================================================== + Catalog reference paper + Bibliography info here + ================================================================================ + ADC_Keywords: Keyword ; Another keyword ; etc + + Description: + Catalog description here. + ================================================================================ + Byte-by-byte Description of file: datafile3.txt + -------------------------------------------------------------------------------- + Bytes Format Units Label Explanations + -------------------------------------------------------------------------------- + 1- 3 I3 --- Index Running identification number + 5- 6 I2 h RAh Hour of Right Ascension (J2000) + 8- 9 I2 min RAm Minute of Right Ascension (J2000) + 11- 15 F5.2 s RAs Second of Right Ascension (J2000) + -------------------------------------------------------------------------------- + Note (1): A CDS file can contain sections with various metadata. + Notes can be multiple lines. + Note (2): Another note. + -------------------------------------------------------------------------------- + 1 03 28 39.09 + 2 04 18 24.11 + + **About parsing the CDS format** + + The CDS format consists of a table description and the table data. These + can be in separate files as a ``ReadMe`` file plus data file(s), or + combined in a single file. Different subsections within the description + are separated by lines of dashes or equal signs ("------" or "======"). + The table which specifies the column information must be preceded by a line + starting with "Byte-by-byte Description of file:". + + In the case where the table description is combined with the data values, + the data must be in the last section and must be preceded by a section + delimiter line (dashes or equal signs only). + + **Basic usage** + + Use the ``ascii.read()`` function as normal, with an optional ``readme`` + parameter indicating the CDS ReadMe file. If not supplied it is assumed that + the header information is at the top of the given table. Examples:: + + >>> from astropy.io import ascii + >>> table = ascii.read("t/cds.dat") + >>> table = ascii.read("t/vizier/table1.dat", readme="t/vizier/ReadMe") + >>> table = ascii.read("t/cds/multi/lhs2065.dat", readme="t/cds/multi/ReadMe") + >>> table = ascii.read("t/cds/glob/lmxbrefs.dat", readme="t/cds/glob/ReadMe") + + The table name and the CDS ReadMe file can be entered as URLs. This can be used + to directly load tables from the Internet. For example, Vizier tables from the + CDS:: + + >>> table = ascii.read("ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/snrs.dat", + ... readme="ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/ReadMe") + + If the header (ReadMe) and data are stored in a single file and there + is content between the header and the data (for instance Notes), then the + parsing process may fail. In this case you can instruct the reader to + guess the actual start of the data by supplying ``data_start='guess'`` in the + call to the ``ascii.read()`` function. You should verify that the output + data table matches expectation based on the input CDS file. + + **Using a reader object** + + When ``Cds`` reader object is created with a ``readme`` parameter + passed to it at initialization, then when the ``read`` method is + executed with a table filename, the header information for the + specified table is taken from the ``readme`` file. An + ``InconsistentTableError`` is raised if the ``readme`` file does not + have header information for the given table. + + >>> readme = "t/vizier/ReadMe" + >>> r = ascii.get_reader(ascii.Cds, readme=readme) + >>> table = r.read("t/vizier/table1.dat") + >>> # table5.dat has the same ReadMe file + >>> table = r.read("t/vizier/table5.dat") + + If no ``readme`` parameter is specified, then the header + information is assumed to be at the top of the given table. + + >>> r = ascii.get_reader(ascii.Cds) + >>> table = r.read("t/cds.dat") + >>> #The following gives InconsistentTableError, since no + >>> #readme file was given and table1.dat does not have a header. + >>> table = r.read("t/vizier/table1.dat") + Traceback (most recent call last): + ... + InconsistentTableError: No CDS section delimiter found + + Caveats: + + * The Units and Explanations are available in the column ``unit`` and + ``description`` attributes, respectively. + * The other metadata defined by this format is not available in the output table. + """ + _format_name = 'cds' + _io_registry_format_aliases = ['cds'] + _io_registry_can_write = False + _description = 'CDS format table' + + data_class = CdsData + header_class = CdsHeader + + def __init__(self, readme=None): + super(Cds, self).__init__() + self.header.readme = readme + + def write(self, table=None): + """Not available for the Cds class (raises NotImplementedError)""" + raise NotImplementedError + + def read(self, table): + # If the read kwarg `data_start` is 'guess' then the table may have extraneous + # lines between the end of the header and the beginning of data. + if self.data.start_line == 'guess': + # Replicate the first part of BaseReader.read up to the point where + # the table lines are initially read in. + with suppress(TypeError): + # For strings only + if os.linesep not in table + '': + self.data.table_name = os.path.basename(table) + + self.data.header = self.header + self.header.data = self.data + + # Get a list of the lines (rows) in the table + lines = self.inputter.get_lines(table) + + # Now try increasing data.start_line by one until the table reads successfully. + # For efficiency use the in-memory list of lines instead of `table`, which + # could be a file. + for data_start in range(len(lines)): + self.data.start_line = data_start + with suppress(Exception): + table = super(Cds, self).read(lines) + return table + else: + return super(Cds, self).read(table) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/cds.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/cds.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4485a5803b455dcd768a1c2491e402ce2b670fbf Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/cds.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/connect.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/connect.py new file mode 100644 index 0000000000000000000000000000000000000000..770ea855bb095515e459a1ab79467525d0eb25de --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/connect.py @@ -0,0 +1,95 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +# This file connects the readers/writers to the astropy.table.Table class + +from __future__ import absolute_import, division, print_function + +import re +import functools + +from .. import registry as io_registry +from ...table import Table +from ...extern.six.moves import zip + +__all__ = [] + + +# Generic +# ======= + + +def read_asciitable(filename, **kwargs): + from .ui import read + return read(filename, **kwargs) + + +io_registry.register_reader('ascii', Table, read_asciitable) + + +def write_asciitable(table, filename, **kwargs): + from .ui import write + return write(table, filename, **kwargs) + + +io_registry.register_writer('ascii', Table, write_asciitable) + + +def io_read(format, filename, **kwargs): + from .ui import read + format = re.sub(r'^ascii\.', '', format) + return read(filename, format=format, **kwargs) + + +def io_write(format, table, filename, **kwargs): + from .ui import write + format = re.sub(r'^ascii\.', '', format) + return write(table, filename, format=format, **kwargs) + + +def io_identify(suffix, origin, filepath, fileobj, *args, **kwargs): + return filepath is not None and filepath.endswith(suffix) + + +def _get_connectors_table(): + from .core import FORMAT_CLASSES + + rows = [] + rows.append(('ascii', '', 'Yes', 'ASCII table in any supported format (uses guessing)')) + for format in sorted(FORMAT_CLASSES): + cls = FORMAT_CLASSES[format] + + io_format = 'ascii.' + cls._format_name + description = getattr(cls, '_description', '') + class_link = ':class:`~{0}.{1}`'.format(cls.__module__, cls.__name__) + suffix = getattr(cls, '_io_registry_suffix', '') + can_write = 'Yes' if getattr(cls, '_io_registry_can_write', True) else '' + + rows.append((io_format, suffix, can_write, + '{0}: {1}'.format(class_link, description))) + out = Table(list(zip(*rows)), names=('Format', 'Suffix', 'Write', 'Description')) + for colname in ('Format', 'Description'): + width = max(len(x) for x in out[colname]) + out[colname].format = '%-{0}s'.format(width) + + return out + + +# Specific +# ======== + +def read_csv(filename, **kwargs): + from .ui import read + kwargs['format'] = 'csv' + return read(filename, **kwargs) + + +def write_csv(table, filename, **kwargs): + from .ui import write + kwargs['format'] = 'csv' + return write(table, filename, **kwargs) + + +csv_identify = functools.partial(io_identify, '.csv') + +io_registry.register_reader('csv', Table, read_csv) +io_registry.register_writer('csv', Table, write_csv) +io_registry.register_identifier('csv', Table, csv_identify) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/connect.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/connect.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39d45b350e13dcb60c656052bc7eec714cf80300 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/connect.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/core.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/core.py new file mode 100644 index 0000000000000000000000000000000000000000..67b84e1de2a69efa264009483bc5463d655972b8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/core.py @@ -0,0 +1,1538 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" An extensible ASCII table reader and writer. + +core.py: + Core base classes and functions for reading and writing tables. + +:Copyright: Smithsonian Astrophysical Observatory (2010) +:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu) +""" + +from __future__ import absolute_import, division, print_function + +import copy +import csv +import functools +import itertools +import operator +import os +import re +import warnings + +from collections import OrderedDict + +import numpy + +from ...extern import six +from ...extern.six.moves import zip, range +from ...extern.six.moves import cStringIO as StringIO +from ...utils.exceptions import AstropyWarning + +from ...table import Table +from ...utils.compat import suppress +from ...utils.data import get_readable_fileobj +from . import connect + +# Global dictionary mapping format arg to the corresponding Reader class +FORMAT_CLASSES = {} + +# Similar dictionary for fast readers +FAST_CLASSES = {} + + +class CsvWriter(object): + """ + Internal class to replace the csv writer ``writerow`` and ``writerows`` + functions so that in the case of ``delimiter=' '`` and + ``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty + fields (when value == ''). + + This changes the API slightly in that the writerow() and writerows() + methods return the output written string instead of the length of + that string. + + Examples + -------- + + >>> from astropy.io.ascii.core import CsvWriter + >>> writer = CsvWriter(delimiter=' ') + >>> print(writer.writerow(['hello', '', 'world'])) + hello "" world + """ + # Random 16-character string that gets injected instead of any + # empty fields and is then replaced post-write with doubled-quotechar. + # Created with: + # ''.join(random.choice(string.printable[:90]) for _ in range(16)) + replace_sentinel = '2b=48Av%0-V3p>bX' + + def __init__(self, csvfile=None, **kwargs): + self.csvfile = csvfile + + # Temporary StringIO for catching the real csv.writer() object output + self.temp_out = StringIO() + self.writer = csv.writer(self.temp_out, **kwargs) + + dialect = self.writer.dialect + self.quotechar2 = dialect.quotechar * 2 + self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (dialect.delimiter == ' ') + + def writerow(self, values): + """ + Similar to csv.writer.writerow but with the custom quoting behavior. + Returns the written string instead of the length of that string. + """ + has_empty = False + + # If QUOTE_MINIMAL and space-delimited then replace empty fields with + # the sentinel value. + if self.quote_empty: + for i, value in enumerate(values): + if value == '': + has_empty = True + values[i] = self.replace_sentinel + + return self._writerow(self.writer.writerow, values, has_empty) + + def writerows(self, values_list): + """ + Similar to csv.writer.writerows but with the custom quoting behavior. + Returns the written string instead of the length of that string. + """ + has_empty = False + + # If QUOTE_MINIMAL and space-delimited then replace empty fields with + # the sentinel value. + if self.quote_empty: + for values in values_list: + for i, value in enumerate(values): + if value == '': + has_empty = True + values[i] = self.replace_sentinel + + return self._writerow(self.writer.writerows, values_list, has_empty) + + def _writerow(self, writerow_func, values, has_empty): + """ + Call ``writerow_func`` (either writerow or writerows) with ``values``. + If it has empty fields that have been replaced then change those + sentinel strings back to quoted empty strings, e.g. ``""``. + """ + # Clear the temporary StringIO buffer that self.writer writes into and + # then call the real csv.writer().writerow or writerows with values. + self.temp_out.seek(0) + self.temp_out.truncate() + writerow_func(values) + + row_string = self.temp_out.getvalue() + + if self.quote_empty and has_empty: + row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string) + + # self.csvfile is defined then write the output. In practice the pure + # Python writer calls with csvfile=None, while the fast writer calls with + # a file-like object. + if self.csvfile: + self.csvfile.write(row_string) + + return row_string + + +class MaskedConstant(numpy.ma.core.MaskedConstant): + """A trivial extension of numpy.ma.masked + + We want to be able to put the generic term ``masked`` into a dictionary. + The constant ``numpy.ma.masked`` is not hashable (see + https://github.com/numpy/numpy/issues/4660), so we need to extend it + here with a hash value. + """ + + def __hash__(self): + '''All instances of this class shall have the same hash.''' + # Any large number will do. + return 1234567890 + + +masked = MaskedConstant() + + +class InconsistentTableError(ValueError): + """ + Indicates that an input table is inconsistent in some way. + + The default behavior of ``BaseReader`` is to throw an instance of + this class if a data row doesn't match the header. + """ + + +class OptionalTableImportError(ImportError): + """ + Indicates that a dependency for table reading is not present. + + An instance of this class is raised whenever an optional reader + with certain required dependencies cannot operate because of + an ImportError. + """ + + +class ParameterError(NotImplementedError): + """ + Indicates that a reader cannot handle a passed parameter. + + The C-based fast readers in ``io.ascii`` raise an instance of + this error class upon encountering a parameter that the + C engine cannot handle. + """ + + +class FastOptionsError(NotImplementedError): + """ + Indicates that one of the specified options for fast + reading is invalid. + """ + + +class NoType(object): + """ + Superclass for ``StrType`` and ``NumType`` classes. + + This class is the default type of ``Column`` and provides a base + class for other data types. + """ + + +class StrType(NoType): + """ + Indicates that a column consists of text data. + """ + + +class NumType(NoType): + """ + Indicates that a column consists of numerical data. + """ + + +class FloatType(NumType): + """ + Describes floating-point data. + """ + + +class BoolType(NoType): + """ + Describes boolean data. + """ + + +class IntType(NumType): + """ + Describes integer data. + """ + + +class AllType(StrType, FloatType, IntType): + """ + Subclass of all other data types. + + This type is returned by ``convert_numpy`` if the given numpy + type does not match ``StrType``, ``FloatType``, or ``IntType``. + """ + + +class Column(object): + """Table column. + + The key attributes of a Column object are: + + * **name** : column name + * **type** : column type (NoType, StrType, NumType, FloatType, IntType) + * **dtype** : numpy dtype (optional, overrides **type** if set) + * **str_vals** : list of column values as strings + * **data** : list of converted column values + """ + + def __init__(self, name): + self.name = name + self.type = NoType # Generic type (Int, Float, Str etc) + self.dtype = None # Numpy dtype if available + self.str_vals = [] + self.fill_values = {} + + +class BaseInputter(object): + """ + Get the lines from the table input and return a list of lines. + + """ + + encoding = None + """Encoding used to read the file""" + + def get_lines(self, table): + """ + Get the lines from the ``table`` input. The input table can be one of: + + * File name + * String (newline separated) with all header and data lines (must have at least 2 lines) + * File-like object with read() method + * List of strings + + Parameters + ---------- + table : str, file_like, list + Can be either a file name, string (newline separated) with all header and data + lines (must have at least 2 lines), a file-like object with a ``read()`` method, + or a list of strings. + + Returns + ------- + lines : list + List of lines + """ + try: + if (hasattr(table, 'read') or + ('\n' not in table + '' and '\r' not in table + '')): + with get_readable_fileobj(table, + encoding=self.encoding) as fileobj: + table = fileobj.read() + lines = table.splitlines() + except TypeError: + try: + # See if table supports indexing, slicing, and iteration + table[0] + table[0:1] + iter(table) + lines = table + except TypeError: + raise TypeError( + 'Input "table" must be a string (filename or data) or an iterable') + + return self.process_lines(lines) + + def process_lines(self, lines): + """Process lines for subsequent use. In the default case do nothing. + This routine is not generally intended for removing comment lines or + stripping whitespace. These are done (if needed) in the header and + data line processing. + + Override this method if something more has to be done to convert raw + input lines to the table rows. For example the + ContinuationLinesInputter derived class accounts for continuation + characters if a row is split into lines.""" + return lines + + +class BaseSplitter(object): + """ + Base splitter that uses python's split method to do the work. + + This does not handle quoted values. A key feature is the formulation of + __call__ as a generator that returns a list of the split line values at + each iteration. + + There are two methods that are intended to be overridden, first + ``process_line()`` to do pre-processing on each input line before splitting + and ``process_val()`` to do post-processing on each split string value. By + default these apply the string ``strip()`` function. These can be set to + another function via the instance attribute or be disabled entirely, for + example:: + + reader.header.splitter.process_val = lambda x: x.lstrip() + reader.data.splitter.process_val = None + + """ + + delimiter = None + """ one-character string used to separate fields """ + + def process_line(self, line): + """Remove whitespace at the beginning or end of line. This is especially useful for + whitespace-delimited files to prevent spurious columns at the beginning or end.""" + return line.strip() + + def process_val(self, val): + """Remove whitespace at the beginning or end of value.""" + return val.strip() + + def __call__(self, lines): + if self.process_line: + lines = (self.process_line(x) for x in lines) + for line in lines: + vals = line.split(self.delimiter) + if self.process_val: + yield [self.process_val(x) for x in vals] + else: + yield vals + + def join(self, vals): + if self.delimiter is None: + delimiter = ' ' + else: + delimiter = self.delimiter + return delimiter.join(str(x) for x in vals) + + +class DefaultSplitter(BaseSplitter): + """Default class to split strings into columns using python csv. The class + attributes are taken from the csv Dialect class. + + Typical usage:: + + # lines = .. + splitter = ascii.DefaultSplitter() + for col_vals in splitter(lines): + for col_val in col_vals: + ... + + """ + delimiter = ' ' + """ one-character string used to separate fields. """ + quotechar = '"' + """ control how instances of *quotechar* in a field are quoted """ + doublequote = True + """ character to remove special meaning from following character """ + escapechar = None + """ one-character stringto quote fields containing special characters """ + quoting = csv.QUOTE_MINIMAL + """ control when quotes are recognised by the reader """ + skipinitialspace = True + """ ignore whitespace immediately following the delimiter """ + csv_writer = None + csv_writer_out = StringIO() + + def process_line(self, line): + """Remove whitespace at the beginning or end of line. This is especially useful for + whitespace-delimited files to prevent spurious columns at the beginning or end. + If splitting on whitespace then replace unquoted tabs with space first""" + if self.delimiter == r'\s': + line = _replace_tab_with_space(line, self.escapechar, self.quotechar) + return line.strip() + + def __call__(self, lines): + """Return an iterator over the table ``lines``, where each iterator output + is a list of the split line values. + + Parameters + ---------- + lines : list + List of table lines + + Returns + ------- + lines : iterator + + """ + if self.process_line: + lines = [self.process_line(x) for x in lines] + + # In Python 2.x the inputs to csv cannot be unicode. In Python 3 these + # lines do nothing. + escapechar = None if self.escapechar is None else str(self.escapechar) + quotechar = None if self.quotechar is None else str(self.quotechar) + delimiter = None if self.delimiter is None else str(self.delimiter) + + if delimiter == r'\s': + delimiter = ' ' + + csv_reader = csv.reader(lines, + delimiter=delimiter, + doublequote=self.doublequote, + escapechar=escapechar, + quotechar=quotechar, + quoting=self.quoting, + skipinitialspace=self.skipinitialspace + ) + for vals in csv_reader: + if self.process_val: + yield [self.process_val(x) for x in vals] + else: + yield vals + + def join(self, vals): + + # In Python 2.x the inputs to csv cannot be unicode + escapechar = None if self.escapechar is None else str(self.escapechar) + quotechar = None if self.quotechar is None else str(self.quotechar) + delimiter = ' ' if self.delimiter is None else str(self.delimiter) + + if self.csv_writer is None: + self.csv_writer = CsvWriter(delimiter=delimiter, + doublequote=self.doublequote, + escapechar=escapechar, + quotechar=quotechar, + quoting=self.quoting, + lineterminator='') + if self.process_val: + vals = [self.process_val(x) for x in vals] + out = self.csv_writer.writerow(vals) + + return out + + +def _replace_tab_with_space(line, escapechar, quotechar): + """Replace tabs with spaces in given string, preserving quoted substrings + + Parameters + ---------- + line : str + String containing tabs to be replaced with spaces. + escapechar : str + Character in ``line`` used to escape special characters. + quotechar : str + Character in ``line`` indicating the start/end of a substring. + + Returns + ------- + line : str + A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings. + """ + newline = [] + in_quote = False + lastchar = 'NONE' + for char in line: + if char == quotechar and lastchar != escapechar: + in_quote = not in_quote + if char == '\t' and not in_quote: + char = ' ' + lastchar = char + newline.append(char) + return ''.join(newline) + + +def _get_line_index(line_or_func, lines): + """Return the appropriate line index, depending on ``line_or_func`` which + can be either a function, a positive or negative int, or None. + """ + + if hasattr(line_or_func, '__call__'): + return line_or_func(lines) + elif line_or_func: + if line_or_func >= 0: + return line_or_func + else: + n_lines = sum(1 for line in lines) + return n_lines + line_or_func + else: + return line_or_func + + +class BaseHeader(object): + """ + Base table header reader + """ + auto_format = 'col{}' + """ format string for auto-generating column names """ + start_line = None + """ None, int, or a function of ``lines`` that returns None or int """ + comment = None + """ regular expression for comment lines """ + splitter_class = DefaultSplitter + """ Splitter class for splitting data lines into columns """ + names = None + """ list of names corresponding to each data column """ + write_comment = False + write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE'] + + def __init__(self): + self.splitter = self.splitter_class() + + def _set_cols_from_names(self): + self.cols = [Column(name=x) for x in self.names] + + def update_meta(self, lines, meta): + """ + Extract any table-level metadata, e.g. keywords, comments, column metadata, from + the table ``lines`` and update the OrderedDict ``meta`` in place. This base + method extracts comment lines and stores them in ``meta`` for output. + """ + if self.comment: + re_comment = re.compile(self.comment) + comment_lines = [x for x in lines if re_comment.match(x)] + else: + comment_lines = [] + comment_lines = [re.sub('^' + self.comment, '', x).strip() + for x in comment_lines] + if comment_lines: + meta.setdefault('table', {})['comments'] = comment_lines + + def get_cols(self, lines): + """Initialize the header Column objects from the table ``lines``. + + Based on the previously set Header attributes find or create the column names. + Sets ``self.cols`` with the list of Columns. + + Parameters + ---------- + lines : list + List of table lines + + """ + + start_line = _get_line_index(self.start_line, self.process_lines(lines)) + if start_line is None: + # No header line so auto-generate names from n_data_cols + # Get the data values from the first line of table data to determine n_data_cols + try: + first_data_vals = next(self.data.get_str_vals()) + except StopIteration: + raise InconsistentTableError('No data lines found so cannot autogenerate ' + 'column names') + n_data_cols = len(first_data_vals) + self.names = [self.auto_format.format(i) + for i in range(1, n_data_cols + 1)] + + else: + for i, line in enumerate(self.process_lines(lines)): + if i == start_line: + break + else: # No header line matching + raise ValueError('No header line found in table') + + self.names = next(self.splitter([line])) + + self._set_cols_from_names() + + def process_lines(self, lines): + """Generator to yield non-blank and non-comment lines""" + if self.comment: + re_comment = re.compile(self.comment) + # Yield non-comment lines + for line in lines: + if line.strip() and (not self.comment or not re_comment.match(line)): + yield line + + def write_comments(self, lines, meta): + if self.write_comment is not False: + for comment in meta.get('comments', []): + lines.append(self.write_comment + comment) + + def write(self, lines): + if self.start_line is not None: + for i, spacer_line in zip(range(self.start_line), + itertools.cycle(self.write_spacer_lines)): + lines.append(spacer_line) + lines.append(self.splitter.join([x.info.name for x in self.cols])) + + @property + def colnames(self): + """Return the column names of the table""" + return tuple(col.name if isinstance(col, Column) else col.info.name + for col in self.cols) + + def get_type_map_key(self, col): + return col.raw_type + + def get_col_type(self, col): + try: + type_map_key = self.get_type_map_key(col) + return self.col_type_map[type_map_key.lower()] + except KeyError: + raise ValueError('Unknown data type ""{}"" for column "{}"'.format( + col.raw_type, col.name)) + + def check_column_names(self, names, strict_names, guessing): + """ + Check column names. + + This must be done before applying the names transformation + so that guessing will fail appropriately if ``names`` is supplied. + For instance if the basic reader is given a table with no column header + row. + + Parameters + ---------- + names : list + User-supplied list of column names + strict_names : bool + Whether to impose extra requirements on names + guessing : bool + True if this method is being called while guessing the table format + """ + if strict_names: + # Impose strict requirements on column names (normally used in guessing) + bads = [" ", ",", "|", "\t", "'", '"'] + for name in self.colnames: + if (_is_number(name) or + len(name) == 0 or + name[0] in bads or + name[-1] in bads): + raise ValueError('Column name {0!r} does not meet strict name requirements' + .format(name)) + # When guessing require at least two columns + if guessing and len(self.colnames) <= 1: + raise ValueError('Strict name guessing requires at least two columns') + + if names is not None and len(names) != len(self.colnames): + raise ValueError('Length of names argument ({0}) does not match number' + ' of table columns ({1})'.format(len(names), len(self.colnames))) + + +class BaseData(object): + """ + Base table data reader. + """ + start_line = None + """ None, int, or a function of ``lines`` that returns None or int """ + end_line = None + """ None, int, or a function of ``lines`` that returns None or int """ + comment = None + """ Regular expression for comment lines """ + splitter_class = DefaultSplitter + """ Splitter class for splitting data lines into columns """ + write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE'] + fill_include_names = None + fill_exclude_names = None + fill_values = [(masked, '')] + formats = {} + + def __init__(self): + # Need to make sure fill_values list is instance attribute, not class attribute. + # On read, this will be overwritten by the default in the ui.read (thus, in + # the current implementation there can be no different default for different + # Readers). On write, ui.py does not specify a default, so this line here matters. + self.fill_values = copy.copy(self.fill_values) + self.formats = copy.copy(self.formats) + self.splitter = self.splitter_class() + + def process_lines(self, lines): + """ + Strip out comment lines and blank lines from list of ``lines`` + + Parameters + ---------- + lines : list + All lines in table + + Returns + ------- + lines : list + List of lines + + """ + nonblank_lines = (x for x in lines if x.strip()) + if self.comment: + re_comment = re.compile(self.comment) + return [x for x in nonblank_lines if not re_comment.match(x)] + else: + return [x for x in nonblank_lines] + + def get_data_lines(self, lines): + """Set the ``data_lines`` attribute to the lines slice comprising the + table data values.""" + data_lines = self.process_lines(lines) + start_line = _get_line_index(self.start_line, data_lines) + end_line = _get_line_index(self.end_line, data_lines) + + if start_line is not None or end_line is not None: + self.data_lines = data_lines[slice(start_line, end_line)] + else: # Don't copy entire data lines unless necessary + self.data_lines = data_lines + + def get_str_vals(self): + """Return a generator that returns a list of column values (as strings) + for each data line.""" + return self.splitter(self.data_lines) + + def masks(self, cols): + """Set fill value for each column and then apply that fill value + + In the first step it is evaluated with value from ``fill_values`` applies to + which column using ``fill_include_names`` and ``fill_exclude_names``. + In the second step all replacements are done for the appropriate columns. + """ + if self.fill_values: + self._set_fill_values(cols) + self._set_masks(cols) + + def _set_fill_values(self, cols): + """Set the fill values of the individual cols based on fill_values of BaseData + + fill values has the following form: + = (, , ...) + fill_values = or list of 's + + """ + if self.fill_values: + # when we write tables the columns may be astropy.table.Columns + # which don't carry a fill_values by default + for col in cols: + if not hasattr(col, 'fill_values'): + col.fill_values = {} + + # if input is only one , then make it a list + with suppress(TypeError): + self.fill_values[0] + '' + self.fill_values = [self.fill_values] + + # Step 1: Set the default list of columns which are affected by + # fill_values + colnames = set(self.header.colnames) + if self.fill_include_names is not None: + colnames.intersection_update(self.fill_include_names) + if self.fill_exclude_names is not None: + colnames.difference_update(self.fill_exclude_names) + + # Step 2a: Find out which columns are affected by this tuple + # iterate over reversed order, so last condition is set first and + # overwritten by earlier conditions + for replacement in reversed(self.fill_values): + if len(replacement) < 2: + raise ValueError("Format of fill_values must be " + "(, , , ...)") + elif len(replacement) == 2: + affect_cols = colnames + else: + affect_cols = replacement[2:] + + for i, key in ((i, x) for i, x in enumerate(self.header.colnames) + if x in affect_cols): + cols[i].fill_values[replacement[0]] = str(replacement[1]) + + def _set_masks(self, cols): + """Replace string values in col.str_vals and set masks""" + if self.fill_values: + for col in (col for col in cols if col.fill_values): + col.mask = numpy.zeros(len(col.str_vals), dtype=numpy.bool) + for i, str_val in ((i, x) for i, x in enumerate(col.str_vals) + if x in col.fill_values): + col.str_vals[i] = col.fill_values[str_val] + col.mask[i] = True + + def _replace_vals(self, cols): + """Replace string values in col.str_vals""" + if self.fill_values: + for col in (col for col in cols if col.fill_values): + for i, str_val in ((i, x) for i, x in enumerate(col.str_vals) + if x in col.fill_values): + col.str_vals[i] = col.fill_values[str_val] + if masked in col.fill_values and hasattr(col, 'mask'): + mask_val = col.fill_values[masked] + for i in col.mask.nonzero()[0]: + col.str_vals[i] = mask_val + + def str_vals(self): + '''convert all values in table to a list of lists of strings''' + self._set_fill_values(self.cols) + self._set_col_formats() + for col in self.cols: + col.str_vals = list(col.info.iter_str_vals()) + self._replace_vals(self.cols) + return [col.str_vals for col in self.cols] + + def write(self, lines): + if hasattr(self.start_line, '__call__'): + raise TypeError('Start_line attribute cannot be callable for write()') + else: + data_start_line = self.start_line or 0 + + while len(lines) < data_start_line: + lines.append(itertools.cycle(self.write_spacer_lines)) + + col_str_iters = self.str_vals() + for vals in zip(*col_str_iters): + lines.append(self.splitter.join(vals)) + + def _set_col_formats(self): + """ + """ + for col in self.cols: + if col.info.name in self.formats: + col.info.format = self.formats[col.name] + + +def convert_numpy(numpy_type): + """Return a tuple containing a function which converts a list into a numpy + array and the type produced by the converter function. + + Parameters + ---------- + numpy_type : numpy data-type + The numpy type required of an array returned by ``converter``. Must be a + valid `numpy type `_, + e.g. numpy.int, numpy.uint, numpy.int8, numpy.int64, numpy.float, + numpy.float64, numpy.str. + + Returns + ------- + (converter, converter_type) : (function, generic data-type) + ``converter`` is a function which accepts a list and converts it to a + numpy array of type ``numpy_type``. + ``converter_type`` tracks the generic data type produced by the converter + function. + + Raises + ------ + ValueError + Raised by ``converter`` if the list elements could not be converted to + the required type. + """ + + # Infer converter type from an instance of numpy_type. + type_name = numpy.array([], dtype=numpy_type).dtype.name + if 'int' in type_name: + converter_type = IntType + elif 'float' in type_name: + converter_type = FloatType + elif 'bool' in type_name: + converter_type = BoolType + elif 'str' in type_name: + converter_type = StrType + else: + converter_type = AllType + + def bool_converter(vals): + """ + Convert values "False" and "True" to bools. Raise an exception + for any other string values. + """ + if len(vals) == 0: + return numpy.array([], dtype=bool) + + # Try a smaller subset first for a long array + if len(vals) > 10000: + svals = numpy.asarray(vals[:1000]) + if not numpy.all((svals == 'False') | (svals == 'True')): + raise ValueError('bool input strings must be only False or True') + vals = numpy.asarray(vals) + trues = vals == 'True' + falses = vals == 'False' + if not numpy.all(trues | falses): + raise ValueError('bool input strings must be only False or True') + return trues + + def generic_converter(vals): + return numpy.array(vals, numpy_type) + + converter = bool_converter if converter_type is BoolType else generic_converter + + return converter, converter_type + + +class BaseOutputter(object): + """Output table as a dict of column objects keyed on column name. The + table data are stored as plain python lists within the column objects. + """ + converters = {} + # Derived classes must define default_converters and __call__ + + @staticmethod + def _validate_and_copy(col, converters): + """Validate the format for the type converters and then copy those + which are valid converters for this column (i.e. converter type is + a subclass of col.type)""" + converters_out = [] + try: + for converter in converters: + converter_func, converter_type = converter + if not issubclass(converter_type, NoType): + raise ValueError() + if issubclass(converter_type, col.type): + converters_out.append((converter_func, converter_type)) + + except (ValueError, TypeError): + raise ValueError('Error: invalid format for converters, see ' + 'documentation\n{}'.format(converters)) + return converters_out + + def _convert_vals(self, cols): + for col in cols: + # If a specific dtype was specified for a column, then use that + # to set the defaults, otherwise use the generic defaults. + default_converters = ([convert_numpy(col.dtype)] if col.dtype + else self.default_converters) + + # If the user supplied a specific convert then that takes precedence over defaults + converters = self.converters.get(col.name, default_converters) + + col.converters = self._validate_and_copy(col, converters) + + # Catch the last error in order to provide additional information + # in case all attempts at column conversion fail. The initial + # value of of last_error will apply if no converters are defined + # and the first col.converters[0] access raises IndexError. + last_err = 'no converters defined' + + while not hasattr(col, 'data'): + try: + converter_func, converter_type = col.converters[0] + if not issubclass(converter_type, col.type): + raise TypeError('converter type does not match column type') + col.data = converter_func(col.str_vals) + col.type = converter_type + except (TypeError, ValueError) as err: + col.converters.pop(0) + last_err = err + except OverflowError as err: + # Overflow during conversion (most likely an int that doesn't fit in native C long). + # Put string at the top of the converters list for the next while iteration. + warnings.warn("OverflowError converting to {0} for column {1}, using string instead." + .format(converter_type.__name__, col.name), AstropyWarning) + col.converters.insert(0, convert_numpy(numpy.str)) + last_err = err + except IndexError: + raise ValueError('Column {} failed to convert: {}'.format(col.name, last_err)) + + +class TableOutputter(BaseOutputter): + """ + Output the table as an astropy.table.Table object. + """ + + default_converters = [convert_numpy(numpy.int), + convert_numpy(numpy.float), + convert_numpy(numpy.str)] + + def __call__(self, cols, meta): + # Sets col.data to numpy array and col.type to io.ascii Type class (e.g. + # FloatType) for each col. + self._convert_vals(cols) + + # If there are any values that were filled and tagged with a mask bit then this + # will be a masked table. Otherwise use a plain table. + masked = any(hasattr(col, 'mask') and numpy.any(col.mask) for col in cols) + + out = Table([x.data for x in cols], names=[x.name for x in cols], masked=masked, + meta=meta['table']) + for col, out_col in zip(cols, out.columns.values()): + if masked and hasattr(col, 'mask'): + out_col.data.mask = col.mask + for attr in ('format', 'unit', 'description'): + if hasattr(col, attr): + setattr(out_col, attr, getattr(col, attr)) + if hasattr(col, 'meta'): + out_col.meta.update(col.meta) + + return out + + +class MetaBaseReader(type): + def __init__(cls, name, bases, dct): + super(MetaBaseReader, cls).__init__(name, bases, dct) + + format = dct.get('_format_name') + if format is None: + return + + fast = dct.get('_fast') + if fast is not None: + FAST_CLASSES[format] = cls + + FORMAT_CLASSES[format] = cls + + io_formats = ['ascii.' + format] + dct.get('_io_registry_format_aliases', []) + + if dct.get('_io_registry_suffix'): + func = functools.partial(connect.io_identify, dct['_io_registry_suffix']) + connect.io_registry.register_identifier(io_formats[0], Table, func) + + for io_format in io_formats: + func = functools.partial(connect.io_read, io_format) + connect.io_registry.register_reader(io_format, Table, func) + + if dct.get('_io_registry_can_write', True): + func = functools.partial(connect.io_write, io_format) + connect.io_registry.register_writer(io_format, Table, func) + + +def _is_number(x): + with suppress(ValueError): + x = float(x) + return True + return False + + +def _apply_include_exclude_names(table, names, include_names, exclude_names): + """ + Apply names, include_names and exclude_names to a table. + + Parameters + ---------- + table : `~astropy.table.Table` + Input table + names : list + List of names to override those in table (set to None to use existing names) + include_names : list + List of names to include in output + exclude_names : list + List of names to exclude from output (applied after ``include_names``) + + """ + + if names is not None: + # Rename table column names to those passed by user + # Temporarily rename with names that are not in `names` or `table.colnames`. + # This ensures that rename succeeds regardless of existing names. + xxxs = 'x' * max(len(name) for name in list(names) + list(table.colnames)) + for ii, colname in enumerate(table.colnames): + table.rename_column(colname, xxxs + str(ii)) + + for ii, name in enumerate(names): + table.rename_column(xxxs + str(ii), name) + + names = set(table.colnames) + if include_names is not None: + names.intersection_update(include_names) + if exclude_names is not None: + names.difference_update(exclude_names) + if names != set(table.colnames): + remove_names = set(table.colnames) - set(names) + table.remove_columns(remove_names) + + +@six.add_metaclass(MetaBaseReader) +class BaseReader(object): + """Class providing methods to read and write an ASCII table using the specified + header, data, inputter, and outputter instances. + + Typical usage is to instantiate a Reader() object and customize the + ``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each + of these is an object of the corresponding class. + + There is one method ``inconsistent_handler`` that can be used to customize the + behavior of ``read()`` in the event that a data row doesn't match the header. + The default behavior is to raise an InconsistentTableError. + + """ + + names = None + include_names = None + exclude_names = None + strict_names = False + guessing = False + encoding = None + + header_class = BaseHeader + data_class = BaseData + inputter_class = BaseInputter + outputter_class = TableOutputter + + def __init__(self): + self.header = self.header_class() + self.data = self.data_class() + self.inputter = self.inputter_class() + self.outputter = self.outputter_class() + # Data and Header instances benefit from a little cross-coupling. Header may need to + # know about number of data columns for auto-column name generation and Data may + # need to know about header (e.g. for fixed-width tables where widths are spec'd in header. + self.data.header = self.header + self.header.data = self.data + + # Metadata, consisting of table-level meta and column-level meta. The latter + # could include information about column type, description, formatting, etc, + # depending on the table meta format. + self.meta = OrderedDict(table=OrderedDict(), + cols=OrderedDict()) + + def read(self, table): + """Read the ``table`` and return the results in a format determined by + the ``outputter`` attribute. + + The ``table`` parameter is any string or object that can be processed + by the instance ``inputter``. For the base Inputter class ``table`` can be + one of: + + * File name + * File-like object + * String (newline separated) with all header and data lines (must have at least 2 lines) + * List of strings + + Parameters + ---------- + table : str, file_like, list + Input table. + + Returns + ------- + table : `~astropy.table.Table` + Output table + + """ + # If ``table`` is a file then store the name in the ``data`` + # attribute. The ``table`` is a "file" if it is a string + # without the new line specific to the OS. + with suppress(TypeError): + # Strings only + if os.linesep not in table + '': + self.data.table_name = os.path.basename(table) + + # Get a list of the lines (rows) in the table + self.lines = self.inputter.get_lines(table) + + # Set self.data.data_lines to a slice of lines contain the data rows + self.data.get_data_lines(self.lines) + + # Extract table meta values (e.g. keywords, comments, etc). Updates self.meta. + self.header.update_meta(self.lines, self.meta) + + # Get the table column definitions + self.header.get_cols(self.lines) + + # Make sure columns are valid + self.header.check_column_names(self.names, self.strict_names, self.guessing) + + self.cols = cols = self.header.cols + self.data.splitter.cols = cols + n_cols = len(cols) + + for i, str_vals in enumerate(self.data.get_str_vals()): + if len(str_vals) != n_cols: + str_vals = self.inconsistent_handler(str_vals, n_cols) + + # if str_vals is None, we skip this row + if str_vals is None: + continue + + # otherwise, we raise an error only if it is still inconsistent + if len(str_vals) != n_cols: + errmsg = ('Number of header columns ({}) inconsistent with' + ' data columns ({}) at data line {}\n' + 'Header values: {}\n' + 'Data values: {}'.format( + n_cols, len(str_vals), i, + [x.name for x in cols], str_vals)) + + raise InconsistentTableError(errmsg) + + for j, col in enumerate(cols): + col.str_vals.append(str_vals[j]) + + self.data.masks(cols) + if hasattr(self.header, 'table_meta'): + self.meta['table'].update(self.header.table_meta) + table = self.outputter(cols, self.meta) + self.cols = self.header.cols + + _apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names) + + return table + + def inconsistent_handler(self, str_vals, ncols): + """ + Adjust or skip data entries if a row is inconsistent with the header. + + The default implementation does no adjustment, and hence will always trigger + an exception in read() any time the number of data entries does not match + the header. + + Note that this will *not* be called if the row already matches the header. + + Parameters + ---------- + str_vals : list + A list of value strings from the current row of the table. + ncols : int + The expected number of entries from the table header. + + Returns + ------- + str_vals : list + List of strings to be parsed into data entries in the output table. If + the length of this list does not match ``ncols``, an exception will be + raised in read(). Can also be None, in which case the row will be + skipped. + """ + # an empty list will always trigger an InconsistentTableError in read() + return str_vals + + @property + def comment_lines(self): + """Return lines in the table that match header.comment regexp""" + if not hasattr(self, 'lines'): + raise ValueError('Table must be read prior to accessing the header comment lines') + if self.header.comment: + re_comment = re.compile(self.header.comment) + comment_lines = [x for x in self.lines if re_comment.match(x)] + else: + comment_lines = [] + return comment_lines + + def update_table_data(self, table): + """ + Update table columns in place if needed. + + This is a hook to allow updating the table columns after name + filtering but before setting up to write the data. This is currently + only used by ECSV and is otherwise just a pass-through. + + Parameters + ---------- + table : `astropy.table.Table` + Input table for writing + + Returns + ------- + table : `astropy.table.Table` + Output table for writing + """ + return table + + def write_header(self, lines, meta): + self.header.write_comments(lines, meta) + self.header.write(lines) + + def write(self, table): + """ + Write ``table`` as list of strings. + + Parameters + ---------- + table : `~astropy.table.Table` + Input table data. + + Returns + ------- + lines : list + List of strings corresponding to ASCII table + + """ + + # Check column names before altering + self.header.cols = list(six.itervalues(table.columns)) + self.header.check_column_names(self.names, self.strict_names, False) + + # In-place update of columns in input ``table`` to reflect column + # filtering. Note that ``table`` is guaranteed to be a copy of the + # original user-supplied table. + _apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names) + + # This is a hook to allow updating the table columns after name + # filtering but before setting up to write the data. This is currently + # only used by ECSV and is otherwise just a pass-through. + table = self.update_table_data(table) + + # Now use altered columns + new_cols = list(six.itervalues(table.columns)) + # link information about the columns to the writer object (i.e. self) + self.header.cols = new_cols + self.data.cols = new_cols + self.header.table_meta = table.meta + + # Write header and data to lines list + lines = [] + self.write_header(lines, table.meta) + self.data.write(lines) + + return lines + + +class ContinuationLinesInputter(BaseInputter): + """Inputter where lines ending in ``continuation_char`` are joined + with the subsequent line. Example:: + + col1 col2 col3 + 1 \ + 2 3 + 4 5 \ + 6 + """ + + continuation_char = '\\' + replace_char = ' ' + # If no_continue is not None then lines matching this regex are not subject + # to line continuation. The initial use case here is Daophot. In this + # case the continuation character is just replaced with replace_char. + no_continue = None + + def process_lines(self, lines): + re_no_continue = re.compile(self.no_continue) if self.no_continue else None + + parts = [] + outlines = [] + for line in lines: + if re_no_continue and re_no_continue.match(line): + line = line.replace(self.continuation_char, self.replace_char) + if line.endswith(self.continuation_char): + parts.append(line.replace(self.continuation_char, self.replace_char)) + else: + parts.append(line) + outlines.append(''.join(parts)) + parts = [] + + return outlines + + +class WhitespaceSplitter(DefaultSplitter): + def process_line(self, line): + """Replace tab with space within ``line`` while respecting quoted substrings""" + newline = [] + in_quote = False + lastchar = None + for char in line: + if char == self.quotechar and (self.escapechar is None or + lastchar != self.escapechar): + in_quote = not in_quote + if char == '\t' and not in_quote: + char = ' ' + lastchar = char + newline.append(char) + + return ''.join(newline) + + +extra_reader_pars = ('Reader', 'Inputter', 'Outputter', + 'delimiter', 'comment', 'quotechar', 'header_start', + 'data_start', 'data_end', 'converters', 'encoding', + 'data_Splitter', 'header_Splitter', + 'names', 'include_names', 'exclude_names', 'strict_names', + 'fill_values', 'fill_include_names', 'fill_exclude_names') + + +def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs): + """Initialize a table reader allowing for common customizations. See ui.get_reader() + for param docs. This routine is for internal (package) use only and is useful + because it depends only on the "core" module. + """ + + from .fastbasic import FastBasic + if issubclass(Reader, FastBasic): # Fast readers handle args separately + if Inputter is not None: + kwargs['Inputter'] = Inputter + return Reader(**kwargs) + + if 'fast_reader' in kwargs: + del kwargs['fast_reader'] # ignore fast_reader parameter for slow readers + reader_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_reader_pars) + reader = Reader(**reader_kwargs) + + if Inputter is not None: + reader.inputter = Inputter() + + if Outputter is not None: + reader.outputter = Outputter() + + # Issue #855 suggested to set data_start to header_start + default_header_length + # Thus, we need to retrieve this from the class definition before resetting these numbers. + try: + default_header_length = reader.data.start_line - reader.header.start_line + except TypeError: # Start line could be None or an instancemethod + default_header_length = None + + if 'delimiter' in kwargs: + reader.header.splitter.delimiter = kwargs['delimiter'] + reader.data.splitter.delimiter = kwargs['delimiter'] + if 'comment' in kwargs: + reader.header.comment = kwargs['comment'] + reader.data.comment = kwargs['comment'] + if 'quotechar' in kwargs: + reader.header.splitter.quotechar = kwargs['quotechar'] + reader.data.splitter.quotechar = kwargs['quotechar'] + if 'data_start' in kwargs: + reader.data.start_line = kwargs['data_start'] + if 'data_end' in kwargs: + reader.data.end_line = kwargs['data_end'] + if 'header_start' in kwargs: + if (reader.header.start_line is not None): + reader.header.start_line = kwargs['header_start'] + # For FixedWidthTwoLine the data_start is calculated relative to the position line. + # However, position_line is given as absolute number and not relative to header_start. + # So, ignore this Reader here. + if (('data_start' not in kwargs) and (default_header_length is not None) + and reader._format_name not in ['fixed_width_two_line', 'commented_header']): + reader.data.start_line = reader.header.start_line + default_header_length + elif kwargs['header_start'] is not None: + # User trying to set a None header start to some value other than None + raise ValueError('header_start cannot be modified for this Reader') + if 'converters' in kwargs: + reader.outputter.converters = kwargs['converters'] + if 'data_Splitter' in kwargs: + reader.data.splitter = kwargs['data_Splitter']() + if 'header_Splitter' in kwargs: + reader.header.splitter = kwargs['header_Splitter']() + if 'names' in kwargs: + reader.names = kwargs['names'] + if 'include_names' in kwargs: + reader.include_names = kwargs['include_names'] + if 'exclude_names' in kwargs: + reader.exclude_names = kwargs['exclude_names'] + # Strict names is normally set only within the guessing process to + # indicate that column names cannot be numeric or have certain + # characters at the beginning or end. It gets used in + # BaseHeader.check_column_names(). + if 'strict_names' in kwargs: + reader.strict_names = kwargs['strict_names'] + if 'fill_values' in kwargs: + reader.data.fill_values = kwargs['fill_values'] + if 'fill_include_names' in kwargs: + reader.data.fill_include_names = kwargs['fill_include_names'] + if 'fill_exclude_names' in kwargs: + reader.data.fill_exclude_names = kwargs['fill_exclude_names'] + if 'encoding' in kwargs: + if six.PY2: + raise ValueError("the encoding parameter is not supported on " + "Python 2") + else: + reader.encoding = kwargs['encoding'] + reader.inputter.encoding = kwargs['encoding'] + + return reader + + +extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats', + 'strip_whitespace', + 'names', 'include_names', 'exclude_names', + 'fill_values', 'fill_include_names', + 'fill_exclude_names') + + +def _get_writer(Writer, fast_writer, **kwargs): + """Initialize a table writer allowing for common customizations. This + routine is for internal (package) use only and is useful because it depends + only on the "core" module. """ + + from .fastbasic import FastBasic + + # A value of None for fill_values imply getting the default string + # representation of masked values (depending on the writer class), but the + # machinery expects a list. The easiest here is to just pop the value off, + # i.e. fill_values=None is the same as not providing it at all. + if 'fill_values' in kwargs and kwargs['fill_values'] is None: + del kwargs['fill_values'] + + if issubclass(Writer, FastBasic): # Fast writers handle args separately + return Writer(**kwargs) + elif fast_writer and 'fast_{0}'.format(Writer._format_name) in FAST_CLASSES: + # Switch to fast writer + kwargs['fast_writer'] = fast_writer + return FAST_CLASSES['fast_{0}'.format(Writer._format_name)](**kwargs) + + writer_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_writer_pars) + writer = Writer(**writer_kwargs) + + if 'delimiter' in kwargs: + writer.header.splitter.delimiter = kwargs['delimiter'] + writer.data.splitter.delimiter = kwargs['delimiter'] + if 'comment' in kwargs: + writer.header.write_comment = kwargs['comment'] + writer.data.write_comment = kwargs['comment'] + if 'quotechar' in kwargs: + writer.header.splitter.quotechar = kwargs['quotechar'] + writer.data.splitter.quotechar = kwargs['quotechar'] + if 'formats' in kwargs: + writer.data.formats = kwargs['formats'] + if 'strip_whitespace' in kwargs: + if kwargs['strip_whitespace']: + # Restore the default SplitterClass process_val method which strips + # whitespace. This may have been changed in the Writer + # initialization (e.g. Rdb and Tab) + writer.data.splitter.process_val = operator.methodcaller('strip') + else: + writer.data.splitter.process_val = None + if 'names' in kwargs: + writer.header.names = kwargs['names'] + if 'include_names' in kwargs: + writer.include_names = kwargs['include_names'] + if 'exclude_names' in kwargs: + writer.exclude_names = kwargs['exclude_names'] + if 'fill_values' in kwargs: + # Prepend user-specified values to the class default. + with suppress(TypeError, IndexError): + # Test if it looks like (match, replace_string, optional_colname), + # in which case make it a list + kwargs['fill_values'][1] + '' + kwargs['fill_values'] = [kwargs['fill_values']] + writer.data.fill_values = kwargs['fill_values'] + writer.data.fill_values + if 'fill_include_names' in kwargs: + writer.data.fill_include_names = kwargs['fill_include_names'] + if 'fill_exclude_names' in kwargs: + writer.data.fill_exclude_names = kwargs['fill_exclude_names'] + return writer diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/core.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/core.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e969d1848cb0fbc772b2096715421620c58de3f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/core.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/cparser.so b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/cparser.so new file mode 100755 index 0000000000000000000000000000000000000000..73dec6b69eebf54dddf174b20ea675ffba91e1fa Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/cparser.so differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/daophot.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/daophot.py new file mode 100644 index 0000000000000000000000000000000000000000..00624526c524912c00a1c0ffa481f22f7089498f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/daophot.py @@ -0,0 +1,395 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +An extensible ASCII table reader and writer. + +Classes to read DAOphot table format + +:Copyright: Smithsonian Astrophysical Observatory (2011) +:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu) +""" + +from __future__ import absolute_import, division, print_function + +import re +import numpy as np +import itertools as itt +from collections import defaultdict, OrderedDict + +from . import core +from . import fixedwidth +from ...extern.six.moves import zip, map, range +from .misc import first_true_index, first_false_index, groupmore + + +class DaophotHeader(core.BaseHeader): + """ + Read the header from a file produced by the IRAF DAOphot routine. + """ + + comment = r'\s*#K' + + # Regex for extracting the format strings + re_format = re.compile(r'%-?(\d+)\.?\d?[sdfg]') + re_header_keyword = re.compile(r'[#]K' + r'\s+ (?P \w+)' + r'\s* = (?P .+) $', + re.VERBOSE) + aperture_values = () + + def __init__(self): + core.BaseHeader.__init__(self) + + def parse_col_defs(self, grouped_lines_dict): + """ + Parse a series of column definition lines like below. There may be several + such blocks in a single file (where continuation characters have already been + stripped). + #N ID XCENTER YCENTER MAG MERR MSKY NITER + #U ## pixels pixels magnitudes magnitudes counts ## + #F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d + """ + line_ids = ('#N', '#U', '#F') + coldef_dict = defaultdict(list) + + # Function to strip identifier lines + stripper = lambda s: s[2:].strip(' \\') + for defblock in zip(*map(grouped_lines_dict.get, line_ids)): + for key, line in zip(line_ids, map(stripper, defblock)): + coldef_dict[key].append(line.split()) + + # Save the original columns so we can use it later to reconstruct the + # original header for writing + if self.data.is_multiline: + # Database contains multi-aperture data. + # Autogen column names, units, formats from last row of column headers + last_names, last_units, last_formats = list(zip(*map(coldef_dict.get, line_ids)))[-1] + N_multiline = len(self.data.first_block) + for i in np.arange(1, N_multiline + 1).astype('U2'): + # extra column names eg. RAPERT2, SUM2 etc... + extended_names = list(map(''.join, zip(last_names, itt.repeat(i)))) + if i == '1': # Enumerate the names starting at 1 + coldef_dict['#N'][-1] = extended_names + else: + coldef_dict['#N'].append(extended_names) + coldef_dict['#U'].append(last_units) + coldef_dict['#F'].append(last_formats) + + # Get column widths from column format specifiers + get_col_width = lambda s: int(self.re_format.search(s).groups()[0]) + col_widths = [[get_col_width(f) for f in formats] + for formats in coldef_dict['#F']] + # original data format might be shorter than 80 characters and filled with spaces + row_widths = np.fromiter(map(sum, col_widths), int) + row_short = Daophot.table_width - row_widths + # fix last column widths + for w, r in zip(col_widths, row_short): + w[-1] += r + + self.col_widths = col_widths + + # merge the multi-line header data into single line data + coldef_dict = dict((k, sum(v, [])) for (k, v) in coldef_dict.items()) + + return coldef_dict + + def update_meta(self, lines, meta): + """ + Extract table-level keywords for DAOphot table. These are indicated by + a leading '#K ' prefix. + """ + table_meta = meta['table'] + + # self.lines = self.get_header_lines(lines) + Nlines = len(self.lines) + if Nlines > 0: + # Group the header lines according to their line identifiers (#K, + # #N, #U, #F or just # (spacer line)) function that grabs the line + # identifier + get_line_id = lambda s: s.split(None, 1)[0] + + # Group lines by the line identifier ('#N', '#U', '#F', '#K') and + # capture line index + gid, groups = zip(*groupmore(get_line_id, self.lines, range(Nlines))) + + # Groups of lines and their indices + grouped_lines, gix = zip(*groups) + + # Dict of line groups keyed by line identifiers + grouped_lines_dict = dict(zip(gid, grouped_lines)) + + # Update the table_meta keywords if necessary + if '#K' in grouped_lines_dict: + keywords = OrderedDict(map(self.extract_keyword_line, grouped_lines_dict['#K'])) + table_meta['keywords'] = keywords + + coldef_dict = self.parse_col_defs(grouped_lines_dict) + + line_ids = ('#N', '#U', '#F') + for name, unit, fmt in zip(*map(coldef_dict.get, line_ids)): + meta['cols'][name] = {'unit': unit, + 'format': fmt} + + self.meta = meta + self.names = coldef_dict['#N'] + + def extract_keyword_line(self, line): + """ + Extract info from a header keyword line (#K) + """ + m = self.re_header_keyword.match(line) + if m: + vals = m.group('stuff').strip().rsplit(None, 2) + keyword_dict = {'units': vals[-2], + 'format': vals[-1], + 'value': (vals[0] if len(vals) > 2 else "")} + return m.group('name'), keyword_dict + + def get_cols(self, lines): + """ + Initialize the header Column objects from the table ``lines`` for a DAOphot + header. The DAOphot header is specialized so that we just copy the entire BaseHeader + get_cols routine and modify as needed. + + + + Parameters + ---------- + lines : list + List of table lines + + Returns + ---------- + col : list + List of table Columns + """ + + if not self.names: + raise core.InconsistentTableError('No column names found in DAOphot header') + + # Create the list of io.ascii column objects + self._set_cols_from_names() + + # Set unit and format as needed. + coldefs = self.meta['cols'] + for col in self.cols: + unit, fmt = map(coldefs[col.name].get, ('unit', 'format')) + if unit != '##': + col.unit = unit + if fmt != '##': + col.format = fmt + + # Set column start and end positions. + col_width = sum(self.col_widths, []) + ends = np.cumsum(col_width) + starts = ends - col_width + for i, col in enumerate(self.cols): + col.start, col.end = starts[i], ends[i] + col.span = col.end - col.start + if hasattr(col, 'format'): + if any(x in col.format for x in 'fg'): + col.type = core.FloatType + elif 'd' in col.format: + col.type = core.IntType + elif 's' in col.format: + col.type = core.StrType + + # INDEF is the missing value marker + self.data.fill_values.append(('INDEF', '0')) + + +class DaophotData(core.BaseData): + splitter_class = fixedwidth.FixedWidthSplitter + start_line = 0 + comment = r'\s*#' + + def __init__(self): + core.BaseData.__init__(self) + self.is_multiline = False + + def get_data_lines(self, lines): + + # Special case for multiline daophot databases. Extract the aperture + # values from the first multiline data block + if self.is_multiline: + # Grab the first column of the special block (aperture values) and + # recreate the aperture description string + aplist = next(zip(*map(str.split, self.first_block))) + self.header.aperture_values = tuple(map(float, aplist)) + + # Set self.data.data_lines to a slice of lines contain the data rows + core.BaseData.get_data_lines(self, lines) + + +class DaophotInputter(core.ContinuationLinesInputter): + + continuation_char = '\\' + multiline_char = '*' + replace_char = ' ' + re_multiline = re.compile(r'(#?)[^\\*#]*(\*?)(\\*) ?$') + + def search_multiline(self, lines, depth=150): + """ + Search lines for special continuation character to determine number of + continued rows in a datablock. For efficiency, depth gives the upper + limit of lines to search. + """ + + # The list of apertures given in the #K APERTURES keyword may not be + # complete!! This happens if the string description of the aperture + # list is longer than the field width of the #K APERTURES field. In + # this case we have to figure out how many apertures there are based on + # the file structure. + + comment, special, cont = zip(*(self.re_multiline.search(l).groups() + for l in lines[:depth])) + + # Find first non-comment line + data_start = first_false_index(comment) + + # No data in lines[:depth]. This may be because there is no data in + # the file, or because the header is really huge. If the latter, + # increasing the search depth should help + if data_start is None: + return None, None, lines[:depth] + + header_lines = lines[:data_start] + + # Find first line ending on special row continuation character '*' + # indexed relative to data_start + first_special = first_true_index(special[data_start:depth]) + if first_special is None: # no special lines + return None, None, header_lines + + # last line ending on special '*', but not on line continue '/' + last_special = first_false_index(special[data_start + first_special:depth]) + # index relative to first_special + + # if first_special is None: #no end of special lines within search + # depth! increase search depth return self.search_multiline( lines, + # depth=2*depth ) + + # indexing now relative to line[0] + markers = np.cumsum([data_start, first_special, last_special]) + # multiline portion of first data block + multiline_block = lines[markers[1]:markers[-1]] + + return markers, multiline_block, header_lines + + def process_lines(self, lines): + + markers, block, header = self.search_multiline(lines) + self.data.is_multiline = markers is not None + self.data.markers = markers + self.data.first_block = block + # set the header lines returned by the search as a attribute of the header + self.data.header.lines = header + + if markers is not None: + lines = lines[markers[0]:] + + continuation_char = self.continuation_char + multiline_char = self.multiline_char + replace_char = self.replace_char + + parts = [] + outlines = [] + for i, line in enumerate(lines): + mo = self.re_multiline.search(line) + if mo: + comment, special, cont = mo.groups() + if comment or cont: + line = line.replace(continuation_char, replace_char) + if special: + line = line.replace(multiline_char, replace_char) + if cont and not comment: + parts.append(line) + if not cont: + parts.append(line) + outlines.append(''.join(parts)) + parts = [] + else: + raise ValueError('multiline re could not match line ' + '{}: {}'.format(i, line)) + + return outlines + + +class Daophot(core.BaseReader): + """ + Read a DAOphot file. + Example:: + + #K MERGERAD = INDEF scaleunit %-23.7g + #K IRAF = NOAO/IRAFV2.10EXPORT version %-23s + #K USER = davis name %-23s + #K HOST = tucana computer %-23s + # + #N ID XCENTER YCENTER MAG MERR MSKY NITER \\ + #U ## pixels pixels magnitudes magnitudes counts ## \\ + #F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d + # + #N SHARPNESS CHI PIER PERROR \\ + #U ## ## ## perrors \\ + #F %-23.3f %-12.3f %-6d %-13s + # + 14 138.538 INDEF 15.461 0.003 34.85955 4 \\ + -0.032 0.802 0 No_error + + The keywords defined in the #K records are available via the output table + ``meta`` attribute:: + + >>> import os + >>> from astropy.io import ascii + >>> filename = os.path.join(ascii.__path__[0], 'tests/t/daophot.dat') + >>> data = ascii.read(filename) + >>> for name, keyword in data.meta['keywords'].items(): + ... print(name, keyword['value'], keyword['units'], keyword['format']) + ... + MERGERAD INDEF scaleunit %-23.7g + IRAF NOAO/IRAFV2.10EXPORT version %-23s + USER name %-23s + ... + + The unit and formats are available in the output table columns:: + + >>> for colname in data.colnames: + ... col = data[colname] + ... print(colname, col.unit, col.format) + ... + ID None %-9d + XCENTER pixels %-10.3f + YCENTER pixels %-10.3f + ... + + Any column values of INDEF are interpreted as a missing value and will be + masked out in the resultant table. + + In case of multi-aperture daophot files containing repeated entries for the last + row of fields, extra unique column names will be created by suffixing + corresponding field names with numbers starting from 2 to N (where N is the + total number of apertures). + For example, + first aperture radius will be RAPERT and corresponding magnitude will be MAG, + second aperture radius will be RAPERT2 and corresponding magnitude will be MAG2, + third aperture radius will be RAPERT3 and corresponding magnitude will be MAG3, + and so on. + + """ + _format_name = 'daophot' + _io_registry_format_aliases = ['daophot'] + _io_registry_can_write = False + _description = 'IRAF DAOphot format table' + + header_class = DaophotHeader + data_class = DaophotData + inputter_class = DaophotInputter + + table_width = 80 + + def __init__(self): + core.BaseReader.__init__(self) + # The inputter needs to know about the data (see DaophotInputter.process_lines) + self.inputter.data = self.data + + def write(self, table=None): + raise NotImplementedError diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/daophot.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/daophot.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0163d3b928e6f4fc5462531ffb9725e2e90c21d Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/daophot.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/ecsv.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/ecsv.py new file mode 100644 index 0000000000000000000000000000000000000000..7d5a59f79296b102d8c5e6ccebffcf2dd6a8e04f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/ecsv.py @@ -0,0 +1,252 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Define the Enhanced Character-Separated-Values (ECSV) which allows for reading and +writing all the meta data associated with an astropy Table object. +""" + +import re +from collections import OrderedDict +import contextlib + +from ...extern import six + +from . import core, basic +from ...table import meta, serialize +from ...utils.data_info import serialize_context_as + +__doctest_requires__ = {'Ecsv': ['yaml']} + +ECSV_VERSION = '0.9' +DELIMITERS = (' ', ',') + + +class EcsvHeader(basic.BasicHeader): + """Header class for which the column definition line starts with the + comment character. See the :class:`CommentedHeader` class for an example. + """ + def process_lines(self, lines): + """Return only non-blank lines that start with the comment regexp. For these + lines strip out the matching characters and leading/trailing whitespace.""" + re_comment = re.compile(self.comment) + for line in lines: + line = line.strip() + if not line: + continue + match = re_comment.match(line) + if match: + out = line[match.end():] + if out: + yield out + else: + # Stop iterating on first failed match for a non-blank line + return + + def write(self, lines): + """ + Write header information in the ECSV ASCII format. This format + starts with a delimiter separated list of the column names in order + to make this format readable by humans and simple csv-type readers. + It then encodes the full table meta and column attributes and meta + as YAML and pretty-prints this in the header. Finally the delimited + column names are repeated again, for humans and readers that look + for the *last* comment line as defining the column names. + """ + if self.splitter.delimiter not in DELIMITERS: + raise ValueError('only space and comma are allowed for delimiter in ECSV format') + + for col in self.cols: + if len(getattr(col, 'shape', ())) > 1: + raise ValueError("ECSV format does not support multidimensional column '{0}'" + .format(col.info.name)) + + # Now assemble the header dict that will be serialized by the YAML dumper + header = {'cols': self.cols, 'schema': 'astropy-2.0'} + + if self.table_meta: + header['meta'] = self.table_meta + + # Set the delimiter only for the non-default option(s) + if self.splitter.delimiter != ' ': + header['delimiter'] = self.splitter.delimiter + + header_yaml_lines = (['%ECSV {0}'.format(ECSV_VERSION), + '---'] + + meta.get_yaml_from_header(header)) + + lines.extend([self.write_comment + line for line in header_yaml_lines]) + lines.append(self.splitter.join([x.info.name for x in self.cols])) + + def write_comments(self, lines, meta): + """ + Override the default write_comments to do nothing since this is handled + in the custom write method. + """ + pass + + def update_meta(self, lines, meta): + """ + Override the default update_meta to do nothing. This process is done + in get_cols() for this reader. + """ + pass + + def get_cols(self, lines): + """ + Initialize the header Column objects from the table ``lines``. + + Parameters + ---------- + lines : list + List of table lines + + """ + # Cache a copy of the original input lines before processing below + raw_lines = lines + + # Extract non-blank comment (header) lines with comment character stripped + lines = list(self.process_lines(lines)) + + # Validate that this is a ECSV file + ecsv_header_re = r"""%ECSV [ ] + (?P \d+) + \. (?P \d+) + \.? (?P \d+)? $""" + + no_header_msg = ('ECSV header line like "# %ECSV " not found as first line.' + ' This is required for a ECSV file.') + + if not lines: + raise core.InconsistentTableError(no_header_msg) + + match = re.match(ecsv_header_re, lines[0].strip(), re.VERBOSE) + if not match: + raise core.InconsistentTableError(no_header_msg) + # ecsv_version could be constructed here, but it is not currently used. + + try: + header = meta.get_header_from_yaml(lines) + except meta.YamlParseError: + raise core.InconsistentTableError('unable to parse yaml in meta header') + + if 'meta' in header: + self.table_meta = header['meta'] + + if 'delimiter' in header: + delimiter = header['delimiter'] + if delimiter not in DELIMITERS: + raise ValueError('only space and comma are allowed for delimiter in ECSV format') + self.splitter.delimiter = delimiter + self.data.splitter.delimiter = delimiter + + # Create the list of io.ascii column objects from `header` + header_cols = OrderedDict((x['name'], x) for x in header['datatype']) + self.names = [x['name'] for x in header['datatype']] + + # Read the first non-commented line of table and split to get the CSV + # header column names. This is essentially what the Basic reader does. + header_line = next(super(EcsvHeader, self).process_lines(raw_lines)) + header_names = next(self.splitter([header_line])) + + # Check for consistency of the ECSV vs. CSV header column names + if header_names != self.names: + raise ValueError('column names from ECSV header {} do not ' + 'match names from header line of CSV data {}' + .format(self.names, header_names)) + + # BaseHeader method to create self.cols, which is a list of + # io.ascii.core.Column objects (*not* Table Column objects). + self._set_cols_from_names() + + # Transfer attributes from the column descriptor stored in the input + # header YAML metadata to the new columns to create this table. + for col in self.cols: + for attr in ('description', 'format', 'unit', 'meta'): + if attr in header_cols[col.name]: + setattr(col, attr, header_cols[col.name][attr]) + col.dtype = header_cols[col.name]['datatype'] + # ECSV "string" means numpy dtype.kind == 'U' AKA str in Python 3 + if not six.PY2 and col.dtype == 'string': + col.dtype = 'str' + if col.dtype.startswith('complex'): + raise TypeError('ecsv reader does not support complex number types') + + +class EcsvOutputter(core.TableOutputter): + """ + After reading the input lines and processing, convert the Reader columns + and metadata to an astropy.table.Table object. This overrides the default + converters to be an empty list because there is no "guessing" of the + conversion function. + """ + default_converters = [] + + def __call__(self, cols, meta): + # Convert to a Table with all plain Column subclass columns + out = super(EcsvOutputter, self).__call__(cols, meta) + + # If mixin columns exist (based on the special '__mixin_columns__' + # key in the table ``meta``), then use that information to construct + # appropriate mixin columns and remove the original data columns. + # If no __mixin_columns__ exists then this function just passes back + # the input table. + out = serialize._construct_mixins_from_columns(out) + + return out + + +class Ecsv(basic.Basic): + """ + Read a file which conforms to the ECSV (Enhanced Character Separated + Values) format. This format allows for specification of key table + and column meta-data, in particular the data type and unit. For details + see: https://github.com/astropy/astropy-APEs/blob/master/APE6.rst. + + Examples + -------- + + >>> from astropy.table import Table + >>> ecsv_content = '''# %ECSV 0.9 + ... # --- + ... # datatype: + ... # - {name: a, unit: m / s, datatype: int64, format: '%03d'} + ... # - {name: b, unit: km, datatype: int64, description: This is column b} + ... a b + ... 001 2 + ... 004 3 + ... ''' + >>> Table.read(ecsv_content, format='ascii.ecsv') + + a b + m / s km + int64 int64 + ----- ----- + 001 2 + 004 3 + """ + _format_name = 'ecsv' + _description = 'Enhanced CSV' + + header_class = EcsvHeader + outputter_class = EcsvOutputter + + def update_table_data(self, table): + """ + Update table columns in place if mixin columns are present. + + This is a hook to allow updating the table columns after name + filtering but before setting up to write the data. This is currently + only used by ECSV and is otherwise just a pass-through. + + Parameters + ---------- + table : `astropy.table.Table` + Input table for writing + + Returns + ------- + table : `astropy.table.Table` + Output table for writing + """ + with serialize_context_as('ecsv'): + out = serialize._represent_mixins_as_columns(table) + return out diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/ecsv.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/ecsv.pyc new file mode 100644 index 0000000000000000000000000000000000000000..808e00a0cd421a28e7909b97feee1a6a916f4eca Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/ecsv.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fastbasic.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fastbasic.py new file mode 100644 index 0000000000000000000000000000000000000000..3cbe1f960a476a5e04c168726af6d7728c9836a9 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fastbasic.py @@ -0,0 +1,349 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +import re +from collections import OrderedDict + +from . import core +from ...extern import six +from ...table import Table +from . import cparser +from ...extern.six.moves import zip +from ...utils import set_locale + + +@six.add_metaclass(core.MetaBaseReader) +class FastBasic(object): + """ + This class is intended to handle the same format addressed by the + ordinary :class:`Basic` writer, but it acts as a wrapper for underlying C + code and is therefore much faster. Unlike the other ASCII readers and + writers, this class is not very extensible and is restricted + by optimization requirements. + """ + _format_name = 'fast_basic' + _description = 'Basic table with custom delimiter using the fast C engine' + _fast = True + fill_extra_cols = False + guessing = False + strict_names = False + + def __init__(self, default_kwargs={}, **user_kwargs): + # Make sure user does not set header_start to None for a reader + # that expects a non-None value (i.e. a number >= 0). This mimics + # what happens in the Basic reader. + if (default_kwargs.get('header_start', 0) is not None and + user_kwargs.get('header_start', 0) is None): + raise ValueError('header_start cannot be set to None for this Reader') + + kwargs = default_kwargs.copy() + kwargs.update(user_kwargs) # user kwargs take precedence over defaults + delimiter = kwargs.pop('delimiter', ' ') + self.delimiter = str(delimiter) if delimiter is not None else None + self.write_comment = kwargs.get('comment', '# ') + self.comment = kwargs.pop('comment', '#') + if self.comment is not None: + self.comment = str(self.comment) + self.quotechar = str(kwargs.pop('quotechar', '"')) + self.header_start = kwargs.pop('header_start', 0) + # If data_start is not specified, start reading + # data right after the header line + data_start_default = user_kwargs.get('data_start', self.header_start + + 1 if self.header_start is not None else 1) + self.data_start = kwargs.pop('data_start', data_start_default) + self.kwargs = kwargs + self.strip_whitespace_lines = True + self.strip_whitespace_fields = True + + def _read_header(self): + # Use the tokenizer by default -- this method + # can be overridden for specialized headers + self.engine.read_header() + + def read(self, table): + """ + Read input data (file-like object, filename, list of strings, or + single string) into a Table and return the result. + """ + if self.comment is not None and len(self.comment) != 1: + raise core.ParameterError("The C reader does not support a comment regex") + elif self.data_start is None: + raise core.ParameterError("The C reader does not allow data_start to be None") + elif self.header_start is not None and self.header_start < 0 and \ + not isinstance(self, FastCommentedHeader): + raise core.ParameterError("The C reader does not allow header_start to be " + "negative except for commented-header files") + elif self.data_start < 0: + raise core.ParameterError("The C reader does not allow data_start to be negative") + elif len(self.delimiter) != 1: + raise core.ParameterError("The C reader only supports 1-char delimiters") + elif len(self.quotechar) != 1: + raise core.ParameterError("The C reader only supports a length-1 quote character") + elif 'converters' in self.kwargs: + raise core.ParameterError("The C reader does not support passing " + "specialized converters") + elif 'encoding' in self.kwargs: + raise core.ParameterError("The C reader does not use the encoding parameter") + elif 'Outputter' in self.kwargs: + raise core.ParameterError("The C reader does not use the Outputter parameter") + elif 'Inputter' in self.kwargs: + raise core.ParameterError("The C reader does not use the Inputter parameter") + elif 'data_Splitter' in self.kwargs or 'header_Splitter' in self.kwargs: + raise core.ParameterError("The C reader does not use a Splitter class") + + self.strict_names = self.kwargs.pop('strict_names', False) + + self.engine = cparser.CParser(table, self.strip_whitespace_lines, + self.strip_whitespace_fields, + delimiter=self.delimiter, + header_start=self.header_start, + comment=self.comment, + quotechar=self.quotechar, + data_start=self.data_start, + fill_extra_cols=self.fill_extra_cols, + **self.kwargs) + conversion_info = self._read_header() + self.check_header() + if conversion_info is not None: + try_int, try_float, try_string = conversion_info + else: + try_int = {} + try_float = {} + try_string = {} + + with set_locale('C'): + data, comments = self.engine.read(try_int, try_float, try_string) + + meta = OrderedDict() + if comments: + meta['comments'] = comments + return Table(data, names=list(self.engine.get_names()), meta=meta) + + def check_header(self): + names = self.engine.get_header_names() or self.engine.get_names() + if self.strict_names: + # Impose strict requirements on column names (normally used in guessing) + bads = [" ", ",", "|", "\t", "'", '"'] + for name in names: + if (core._is_number(name) or + len(name) == 0 or + name[0] in bads or + name[-1] in bads): + raise ValueError('Column name {0!r} does not meet strict name requirements' + .format(name)) + # When guessing require at least two columns + if self.guessing and len(names) <= 1: + raise ValueError('Strict name guessing requires at least two columns') + + def write(self, table, output): + """ + Use a fast Cython method to write table data to output, + where output is a filename or file-like object. + """ + self._write(table, output, {}) + + def _write(self, table, output, default_kwargs, + header_output=True, output_types=False): + + write_kwargs = {'delimiter': self.delimiter, + 'quotechar': self.quotechar, + 'strip_whitespace': self.strip_whitespace_fields, + 'comment': self.write_comment + } + write_kwargs.update(default_kwargs) + # user kwargs take precedence over default kwargs + write_kwargs.update(self.kwargs) + writer = cparser.FastWriter(table, **write_kwargs) + writer.write(output, header_output, output_types) + + +class FastCsv(FastBasic): + """ + A faster version of the ordinary :class:`Csv` writer that uses the + optimized C parsing engine. Note that this reader will append empty + field values to the end of any row with not enough columns, while + :class:`FastBasic` simply raises an error. + """ + _format_name = 'fast_csv' + _description = 'Comma-separated values table using the fast C engine' + _fast = True + fill_extra_cols = True + + def __init__(self, **kwargs): + super(FastCsv, self).__init__({'delimiter': ',', 'comment': None}, **kwargs) + + def write(self, table, output): + """ + Override the default write method of `FastBasic` to + output masked values as empty fields. + """ + self._write(table, output, {'fill_values': [(core.masked, '')]}) + + +class FastTab(FastBasic): + """ + A faster version of the ordinary :class:`Tab` reader that uses + the optimized C parsing engine. + """ + _format_name = 'fast_tab' + _description = 'Tab-separated values table using the fast C engine' + _fast = True + + def __init__(self, **kwargs): + super(FastTab, self).__init__({'delimiter': '\t'}, **kwargs) + self.strip_whitespace_lines = False + self.strip_whitespace_fields = False + + +class FastNoHeader(FastBasic): + """ + This class uses the fast C engine to read tables with no header line. If + the names parameter is unspecified, the columns will be autonamed with + "col{}". + """ + _format_name = 'fast_no_header' + _description = 'Basic table with no headers using the fast C engine' + _fast = True + + def __init__(self, **kwargs): + super(FastNoHeader, self).__init__({'header_start': None, 'data_start': 0}, **kwargs) + + def write(self, table, output): + """ + Override the default writing behavior in `FastBasic` so + that columns names are not included in output. + """ + self._write(table, output, {}, header_output=None) + + +class FastCommentedHeader(FastBasic): + """ + A faster version of the :class:`CommentedHeader` reader, which looks for + column names in a commented line. ``header_start`` denotes the index of + the header line among all commented lines and is 0 by default. + """ + _format_name = 'fast_commented_header' + _description = 'Columns name in a commented line using the fast C engine' + _fast = True + + def __init__(self, **kwargs): + super(FastCommentedHeader, self).__init__({}, **kwargs) + # Mimic CommentedHeader's behavior in which data_start + # is relative to header_start if unspecified; see #2692 + if 'data_start' not in kwargs: + self.data_start = 0 + + def read(self, table): + """ + Read input data (file-like object, filename, list of strings, or + single string) into a Table and return the result. + """ + out = super(FastCommentedHeader, self).read(table) + + # Strip off the comment line set as the header line for + # commented_header format (first by default). + if 'comments' in out.meta: + idx = self.header_start + if idx < 0: + idx = len(out.meta['comments']) + idx + out.meta['comments'] = out.meta['comments'][:idx] + out.meta['comments'][idx+1:] + if not out.meta['comments']: + del out.meta['comments'] + + return out + + def _read_header(self): + tmp = self.engine.source + commented_lines = [] + + for line in tmp.splitlines(): + line = line.lstrip() + if line and line[0] == self.comment: # line begins with a comment + commented_lines.append(line[1:]) + if len(commented_lines) == self.header_start + 1: + break + + if len(commented_lines) <= self.header_start: + raise cparser.CParserError('not enough commented lines') + + self.engine.setup_tokenizer([commented_lines[self.header_start]]) + self.engine.header_start = 0 + self.engine.read_header() + self.engine.setup_tokenizer(tmp) + + def write(self, table, output): + """ + Override the default writing behavior in `FastBasic` so + that column names are commented. + """ + self._write(table, output, {}, header_output='comment') + + +class FastRdb(FastBasic): + """ + A faster version of the :class:`Rdb` reader. This format is similar to + tab-delimited, but it also contains a header line after the column + name line denoting the type of each column (N for numeric, S for string). + """ + _format_name = 'fast_rdb' + _description = 'Tab-separated with a type definition header line' + _fast = True + + def __init__(self, **kwargs): + super(FastRdb, self).__init__({'delimiter': '\t', 'data_start': 2}, **kwargs) + self.strip_whitespace_lines = False + self.strip_whitespace_fields = False + + def _read_header(self): + tmp = self.engine.source + line1 = '' + line2 = '' + for line in tmp.splitlines(): + # valid non-comment line + if not line1 and line.strip() and line.lstrip()[0] != self.comment: + line1 = line + elif not line2 and line.strip() and line.lstrip()[0] != self.comment: + line2 = line + break + else: # less than 2 lines in table + raise ValueError('RDB header requires 2 lines') + + # tokenize the two header lines separately + self.engine.setup_tokenizer([line2]) + self.engine.header_start = 0 + self.engine.read_header() + types = self.engine.get_names() + self.engine.setup_tokenizer([line1]) + self.engine.set_names([]) + self.engine.read_header() + + if len(self.engine.get_names()) != len(types): + raise ValueError('RDB header mismatch between number of ' + 'column names and column types') + + if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in types): + raise ValueError('RDB type definitions do not all match ' + '[num](N|S): {0}'.format(types)) + + try_int = {} + try_float = {} + try_string = {} + + for name, col_type in zip(self.engine.get_names(), types): + if col_type[-1].lower() == 's': + try_int[name] = 0 + try_float[name] = 0 + try_string[name] = 1 + else: + try_int[name] = 1 + try_float[name] = 1 + try_string[name] = 0 + + self.engine.setup_tokenizer(tmp) + return (try_int, try_float, try_string) + + def write(self, table, output): + """ + Override the default writing behavior in `FastBasic` to + output a line with column types after the column name line. + """ + self._write(table, output, {}, output_types=True) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fastbasic.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fastbasic.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b28e645484eb1bb9781a2de0cf6bc2bfe572cb7 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fastbasic.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fixedwidth.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fixedwidth.py new file mode 100644 index 0000000000000000000000000000000000000000..c98e973e66b6fdb4eb6760e43cda84823c88bde7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fixedwidth.py @@ -0,0 +1,406 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""An extensible ASCII table reader and writer. + +fixedwidth.py: + Read or write a table with fixed width columns. + +:Copyright: Smithsonian Astrophysical Observatory (2011) +:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu) +""" + +from __future__ import absolute_import, division, print_function + +from ...extern.six.moves import zip, range + +from . import core +from .core import InconsistentTableError, DefaultSplitter +from . import basic + + +class FixedWidthSplitter(core.BaseSplitter): + """ + Split line based on fixed start and end positions for each ``col`` in + ``self.cols``. + + This class requires that the Header class will have defined ``col.start`` + and ``col.end`` for each column. The reference to the ``header.cols`` gets + put in the splitter object by the base Reader.read() function just in time + for splitting data lines by a ``data`` object. + + Note that the ``start`` and ``end`` positions are defined in the pythonic + style so line[start:end] is the desired substring for a column. This splitter + class does not have a hook for ``process_lines`` since that is generally not + useful for fixed-width input. + + """ + delimiter_pad = '' + bookend = False + delimiter = '|' + + def __call__(self, lines): + for line in lines: + vals = [line[x.start:x.end] for x in self.cols] + if self.process_val: + yield [self.process_val(x) for x in vals] + else: + yield vals + + def join(self, vals, widths): + pad = self.delimiter_pad or '' + delimiter = self.delimiter or '' + padded_delim = pad + delimiter + pad + if self.bookend: + bookend_left = delimiter + pad + bookend_right = pad + delimiter + else: + bookend_left = '' + bookend_right = '' + vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)] + return bookend_left + padded_delim.join(vals) + bookend_right + + +class FixedWidthHeaderSplitter(DefaultSplitter): + '''Splitter class that splits on ``|``.''' + delimiter = '|' + + +class FixedWidthHeader(basic.BasicHeader): + """ + Fixed width table header reader. + """ + splitter_class = FixedWidthHeaderSplitter + """ Splitter class for splitting data lines into columns """ + position_line = None # secondary header line position + """ row index of line that specifies position (default = 1) """ + set_of_position_line_characters = set(r'`~!#$%^&*-_+=\|":' + "'") + + def get_line(self, lines, index): + for i, line in enumerate(self.process_lines(lines)): + if i == index: + break + else: # No header line matching + raise InconsistentTableError('No header line found in table') + return line + + def get_cols(self, lines): + """ + Initialize the header Column objects from the table ``lines``. + + Based on the previously set Header attributes find or create the column names. + Sets ``self.cols`` with the list of Columns. + + Parameters + ---------- + lines : list + List of table lines + + """ + + # See "else" clause below for explanation of start_line and position_line + start_line = core._get_line_index(self.start_line, self.process_lines(lines)) + position_line = core._get_line_index(self.position_line, self.process_lines(lines)) + + # If start_line is none then there is no header line. Column positions are + # determined from first data line and column names are either supplied by user + # or auto-generated. + if start_line is None: + if position_line is not None: + raise ValueError("Cannot set position_line without also setting header_start") + data_lines = self.data.process_lines(lines) + if not data_lines: + raise InconsistentTableError( + 'No data lines found so cannot autogenerate column names') + vals, starts, ends = self.get_fixedwidth_params(data_lines[0]) + + self.names = [self.auto_format.format(i) + for i in range(1, len(vals) + 1)] + + else: + # This bit of code handles two cases: + # start_line = and position_line = None + # Single header line where that line is used to determine both the + # column positions and names. + # start_line = and position_line = + # Two header lines where the first line defines the column names and + # the second line defines the column positions + + if position_line is not None: + # Define self.col_starts and self.col_ends so that the call to + # get_fixedwidth_params below will use those to find the header + # column names. Note that get_fixedwidth_params returns Python + # slice col_ends but expects inclusive col_ends on input (for + # more intuitive user interface). + line = self.get_line(lines, position_line) + if len(set(line) - set([self.splitter.delimiter, ' '])) != 1: + raise InconsistentTableError('Position line should only contain delimiters and one other character, e.g. "--- ------- ---".') + # The line above lies. It accepts white space as well. + # We don't want to encourage using three different + # characters, because that can cause ambiguities, but white + # spaces are so common everywhere that practicality beats + # purity here. + charset = self.set_of_position_line_characters.union(set([self.splitter.delimiter, ' '])) + if not set(line).issubset(charset): + raise InconsistentTableError('Characters in position line must be part of {0}'.format(charset)) + vals, self.col_starts, col_ends = self.get_fixedwidth_params(line) + self.col_ends = [x - 1 if x is not None else None for x in col_ends] + + # Get the header column names and column positions + line = self.get_line(lines, start_line) + vals, starts, ends = self.get_fixedwidth_params(line) + + self.names = vals + + self._set_cols_from_names() + + # Set column start and end positions. + for i, col in enumerate(self.cols): + col.start = starts[i] + col.end = ends[i] + + def get_fixedwidth_params(self, line): + """ + Split ``line`` on the delimiter and determine column values and + column start and end positions. This might include null columns with + zero length (e.g. for ``header row = "| col1 || col2 | col3 |"`` or + ``header2_row = "----- ------- -----"``). The null columns are + stripped out. Returns the values between delimiters and the + corresponding start and end positions. + + Parameters + ---------- + line : str + Input line + + Returns + ------- + vals : list + List of values. + starts : list + List of starting indices. + ends : list + List of ending indices. + + """ + + # If column positions are already specified then just use those. + # If neither column starts or ends are given, figure out positions + # between delimiters. Otherwise, either the starts or the ends have + # been given, so figure out whichever wasn't given. + if self.col_starts is not None and self.col_ends is not None: + starts = list(self.col_starts) # could be any iterable, e.g. np.array + ends = [x + 1 if x is not None else None for x in self.col_ends] # user supplies inclusive endpoint + if len(starts) != len(ends): + raise ValueError('Fixed width col_starts and col_ends must have the same length') + vals = [line[start:end].strip() for start, end in zip(starts, ends)] + elif self.col_starts is None and self.col_ends is None: + # There might be a cleaner way to do this but it works... + vals = line.split(self.splitter.delimiter) + starts = [0] + ends = [] + for val in vals: + if val: + ends.append(starts[-1] + len(val)) + starts.append(ends[-1] + 1) + else: + starts[-1] += 1 + starts = starts[:-1] + vals = [x.strip() for x in vals if x] + if len(vals) != len(starts) or len(vals) != len(ends): + raise InconsistentTableError('Error parsing fixed width header') + else: + # exactly one of col_starts or col_ends is given... + if self.col_starts is not None: + starts = list(self.col_starts) + ends = starts[1:] + [None] # Assume each col ends where the next starts + else: # self.col_ends is not None + ends = [x + 1 for x in self.col_ends] + starts = [0] + ends[:-1] # Assume each col starts where the last ended + vals = [line[start:end].strip() for start, end in zip(starts, ends)] + + return vals, starts, ends + + def write(self, lines): + # Header line not written until data are formatted. Until then it is + # not known how wide each column will be for fixed width. + pass + + +class FixedWidthData(basic.BasicData): + """ + Base table data reader. + """ + splitter_class = FixedWidthSplitter + """ Splitter class for splitting data lines into columns """ + + def write(self, lines): + vals_list = [] + col_str_iters = self.str_vals() + for vals in zip(*col_str_iters): + vals_list.append(vals) + + for i, col in enumerate(self.cols): + col.width = max([len(vals[i]) for vals in vals_list]) + if self.header.start_line is not None: + col.width = max(col.width, len(col.info.name)) + + widths = [col.width for col in self.cols] + + if self.header.start_line is not None: + lines.append(self.splitter.join([col.info.name for col in self.cols], + widths)) + + if self.header.position_line is not None: + char = self.header.position_char + if len(char) != 1: + raise ValueError('Position_char="{}" must be a single ' + 'character'.format(char)) + vals = [char * col.width for col in self.cols] + lines.append(self.splitter.join(vals, widths)) + + for vals in vals_list: + lines.append(self.splitter.join(vals, widths)) + + return lines + + +class FixedWidth(basic.Basic): + """ + Read or write a fixed width table with a single header line that defines column + names and positions. Examples:: + + # Bar delimiter in header and data + + | Col1 | Col2 | Col3 | + | 1.2 | hello there | 3 | + | 2.4 | many words | 7 | + + # Bar delimiter in header only + + Col1 | Col2 | Col3 + 1.2 hello there 3 + 2.4 many words 7 + + # No delimiter with column positions specified as input + + Col1 Col2Col3 + 1.2hello there 3 + 2.4many words 7 + + See the :ref:`fixed_width_gallery` for specific usage examples. + + """ + _format_name = 'fixed_width' + _description = 'Fixed width' + + header_class = FixedWidthHeader + data_class = FixedWidthData + + def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True): + super(FixedWidth, self).__init__() + self.data.splitter.delimiter_pad = delimiter_pad + self.data.splitter.bookend = bookend + self.header.col_starts = col_starts + self.header.col_ends = col_ends + + +class FixedWidthNoHeaderHeader(FixedWidthHeader): + '''Header reader for fixed with tables with no header line''' + start_line = None + + +class FixedWidthNoHeaderData(FixedWidthData): + '''Data reader for fixed width tables with no header line''' + start_line = 0 + + +class FixedWidthNoHeader(FixedWidth): + """ + Read or write a fixed width table which has no header line. Column + names are either input (``names`` keyword) or auto-generated. Column + positions are determined either by input (``col_starts`` and ``col_stops`` + keywords) or by splitting the first data line. In the latter case a + ``delimiter`` is required to split the data line. + + Examples:: + + # Bar delimiter in header and data + + | 1.2 | hello there | 3 | + | 2.4 | many words | 7 | + + # Compact table having no delimiter and column positions specified as input + + 1.2hello there3 + 2.4many words 7 + + This class is just a convenience wrapper around the ``FixedWidth`` reader + but with ``header.start_line = None`` and ``data.start_line = 0``. + + See the :ref:`fixed_width_gallery` for specific usage examples. + + """ + _format_name = 'fixed_width_no_header' + _description = 'Fixed width with no header' + header_class = FixedWidthNoHeaderHeader + data_class = FixedWidthNoHeaderData + + def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True): + super(FixedWidthNoHeader, self).__init__(col_starts, col_ends, + delimiter_pad=delimiter_pad, bookend=bookend) + + +class FixedWidthTwoLineHeader(FixedWidthHeader): + '''Header reader for fixed width tables splitting on whitespace. + + For fixed width tables with several header lines, there is typically + a white-space delimited format line, so splitting on white space is + needed. + ''' + splitter_class = DefaultSplitter + + +class FixedWidthTwoLineDataSplitter(FixedWidthSplitter): + '''Splitter for fixed width tables splitting on ``' '``.''' + delimiter = ' ' + + +class FixedWidthTwoLineData(FixedWidthData): + '''Data reader for fixed with tables with two header lines.''' + splitter_class = FixedWidthTwoLineDataSplitter + + +class FixedWidthTwoLine(FixedWidth): + """ + Read or write a fixed width table which has two header lines. The first + header line defines the column names and the second implicitly defines the + column positions. Examples:: + + # Typical case with column extent defined by ---- under column names. + + col1 col2 <== header_start = 0 + ----- ------------ <== position_line = 1, position_char = "-" + 1 bee flies <== data_start = 2 + 2 fish swims + + # Pretty-printed table + + +------+------------+ + | Col1 | Col2 | + +------+------------+ + | 1.2 | "hello" | + | 2.4 | there world| + +------+------------+ + + See the :ref:`fixed_width_gallery` for specific usage examples. + + """ + _format_name = 'fixed_width_two_line' + _description = 'Fixed width with second header line' + data_class = FixedWidthTwoLineData + header_class = FixedWidthTwoLineHeader + + def __init__(self, position_line=1, position_char='-', delimiter_pad=None, bookend=False): + super(FixedWidthTwoLine, self).__init__(delimiter_pad=delimiter_pad, bookend=bookend) + self.header.position_line = position_line + self.header.position_char = position_char + self.data.start_line = position_line + 1 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fixedwidth.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fixedwidth.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b27334d1287162bee4cef2a7fdb3aa78ad528dab Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/fixedwidth.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/html.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/html.py new file mode 100644 index 0000000000000000000000000000000000000000..07524f16aedf62ab04e2ff65918df08ceeb20ceb --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/html.py @@ -0,0 +1,469 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""An extensible HTML table reader and writer. + +html.py: + Classes to read and write HTML tables + +`BeautifulSoup `_ +must be installed to read HTML tables. +""" + +from __future__ import absolute_import, division, print_function + +import warnings +import numpy + +from ...extern import six +from ...extern.six.moves import zip, range + +from . import core +from ...table import Column +from ...utils.xml import writer + +from copy import deepcopy + + +class SoupString(str): + """ + Allows for strings to hold BeautifulSoup data. + """ + + def __new__(cls, *args, **kwargs): + return str.__new__(cls, *args, **kwargs) + + def __init__(self, val): + self.soup = val + + +class ListWriter: + """ + Allows for XMLWriter to write to a list instead of a file. + """ + + def __init__(self, out): + self.out = out + + def write(self, data): + self.out.append(data) + + +def identify_table(soup, htmldict, numtable): + """ + Checks whether the given BeautifulSoup tag is the table + the user intends to process. + """ + + if soup is None or soup.name != 'table': + return False # Tag is not a
+ + elif 'table_id' not in htmldict: + return numtable == 1 + table_id = htmldict['table_id'] + + if isinstance(table_id, six.string_types): + return 'id' in soup.attrs and soup['id'] == table_id + elif isinstance(table_id, int): + return table_id == numtable + + # Return False if an invalid parameter is given + return False + + +class HTMLInputter(core.BaseInputter): + """ + Input lines of HTML in a valid form. + + This requires `BeautifulSoup + `_ to be installed. + """ + + def process_lines(self, lines): + """ + Convert the given input into a list of SoupString rows + for further processing. + """ + + try: + from bs4 import BeautifulSoup + except ImportError: + raise core.OptionalTableImportError('BeautifulSoup must be ' + 'installed to read HTML tables') + + if 'parser' not in self.html: + with warnings.catch_warnings(): + # Ignore bs4 parser warning #4550. + warnings.filterwarnings('ignore', '.*no parser was explicitly specified.*') + soup = BeautifulSoup('\n'.join(lines)) + else: # use a custom backend parser + soup = BeautifulSoup('\n'.join(lines), self.html['parser']) + tables = soup.find_all('table') + for i, possible_table in enumerate(tables): + if identify_table(possible_table, self.html, i + 1): + table = possible_table # Find the correct table + break + else: + if isinstance(self.html['table_id'], int): + err_descr = 'number {0}'.format(self.html['table_id']) + else: + err_descr = "id '{0}'".format(self.html['table_id']) + raise core.InconsistentTableError( + 'ERROR: HTML table {0} not found'.format(err_descr)) + + # Get all table rows + soup_list = [SoupString(x) for x in table.find_all('tr')] + + return soup_list + + +class HTMLSplitter(core.BaseSplitter): + """ + Split HTML table data. + """ + + def __call__(self, lines): + """ + Return HTML data from lines as a generator. + """ + for line in lines: + if not isinstance(line, SoupString): + raise TypeError('HTML lines should be of type SoupString') + soup = line.soup + header_elements = soup.find_all('th') + if header_elements: + # Return multicolumns as tuples for HTMLHeader handling + yield [(el.text.strip(), el['colspan']) if el.has_attr('colspan') + else el.text.strip() for el in header_elements] + data_elements = soup.find_all('td') + if data_elements: + yield [el.text.strip() for el in data_elements] + if len(lines) == 0: + raise core.InconsistentTableError('HTML tables must contain data ' + 'in a
tag') + + +class HTMLOutputter(core.TableOutputter): + """ + Output the HTML data as an ``astropy.table.Table`` object. + + This subclass allows for the final table to contain + multidimensional columns (defined using the colspan attribute + of ', + '', + '', + ''] + assert [str(x) for x in inputter.get_lines(table)] == expected + + # Should raise an InconsistentTableError if the table is not found + inputter.html = {'table_id': 4} + with pytest.raises(core.InconsistentTableError): + inputter.get_lines(table) + + # Identification by string ID + inputter.html['table_id'] = 'second' + expected = ['', + '', + '', + ''] + assert [str(x) for x in inputter.get_lines(table)] == expected + + # Identification by integer index + inputter.html['table_id'] = 3 + expected = ['', + '', + '', + ''] + assert [str(x) for x in inputter.get_lines(table)] == expected + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_htmlsplitter(): + """ + Test to make sure that HTMLSplitter correctly inputs lines + of type SoupString to return a generator that gives all + header and data elements. + """ + + splitter = html.HTMLSplitter() + + lines = [html.SoupString(BeautifulSoup('
). + """ + + default_converters = [core.convert_numpy(numpy.int), + core.convert_numpy(numpy.float), + core.convert_numpy(numpy.str), + core.convert_numpy(numpy.unicode)] + + def __call__(self, cols, meta): + """ + Process the data in multidimensional columns. + """ + new_cols = [] + col_num = 0 + + while col_num < len(cols): + col = cols[col_num] + if hasattr(col, 'colspan'): + # Join elements of spanned columns together into list of tuples + span_cols = cols[col_num:col_num + col.colspan] + new_col = core.Column(col.name) + new_col.str_vals = list(zip(*[x.str_vals for x in span_cols])) + new_cols.append(new_col) + col_num += col.colspan + else: + new_cols.append(col) + col_num += 1 + + return super(HTMLOutputter, self).__call__(new_cols, meta) + + +class HTMLHeader(core.BaseHeader): + splitter_class = HTMLSplitter + + def start_line(self, lines): + """ + Return the line number at which header data begins. + """ + + for i, line in enumerate(lines): + if not isinstance(line, SoupString): + raise TypeError('HTML lines should be of type SoupString') + soup = line.soup + if soup.th is not None: + return i + + return None + + def _set_cols_from_names(self): + """ + Set columns from header names, handling multicolumns appropriately. + """ + self.cols = [] + new_names = [] + + for name in self.names: + if isinstance(name, tuple): + col = core.Column(name=name[0]) + col.colspan = int(name[1]) + self.cols.append(col) + new_names.append(name[0]) + for i in range(1, int(name[1])): + # Add dummy columns + self.cols.append(core.Column('')) + new_names.append('') + else: + self.cols.append(core.Column(name=name)) + new_names.append(name) + + self.names = new_names + + +class HTMLData(core.BaseData): + splitter_class = HTMLSplitter + + def start_line(self, lines): + """ + Return the line number at which table data begins. + """ + + for i, line in enumerate(lines): + if not isinstance(line, SoupString): + raise TypeError('HTML lines should be of type SoupString') + soup = line.soup + + if soup.td is not None: + if soup.th is not None: + raise core.InconsistentTableError('HTML tables cannot ' + 'have headings and data in the same row') + return i + + raise core.InconsistentTableError('No start line found for HTML data') + + def end_line(self, lines): + """ + Return the line number at which table data ends. + """ + last_index = -1 + + for i, line in enumerate(lines): + if not isinstance(line, SoupString): + raise TypeError('HTML lines should be of type SoupString') + soup = line.soup + if soup.td is not None: + last_index = i + + if last_index == -1: + return None + return last_index + 1 + + +class HTML(core.BaseReader): + """Read and write HTML tables. + + In order to customize input and output, a dict of parameters may + be passed to this class holding specific customizations. + + **htmldict** : Dictionary of parameters for HTML input/output. + + * css : Customized styling + If present, this parameter will be included in a + + + + + + + +
Column 1Column 2Column 3
1a1.05
2b2.75
3c-1.25
+ + + + + +
Column AColumn BColumn C
4d10.5
5e27.5
6f-12.5
+ + + + + +
C1C2C3
7g105.0
8h275.0
9i-125.0
+ + \ No newline at end of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/html2.html b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/html2.html new file mode 100644 index 0000000000000000000000000000000000000000..50b1fee0632cf7f7f9e4ea0dba93f61bba485b4a --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/html2.html @@ -0,0 +1,28 @@ + + + + + + + + +Row with no data elements + + + + + + + + + + + + + + + Some junk + +
AB
12.50000000000000000013
1a13.5
+ + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/ipac.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/ipac.dat new file mode 100644 index 0000000000000000000000000000000000000000..f7d51cd66ba324878bb6bd52f88d1b77f3555843 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/ipac.dat @@ -0,0 +1,12 @@ +\intval = 1 +\floatval=2.3e3 +\date = "Wed Sp 20 09:48:36 1995" +\key_continue = 'IPAC keywords ' +\key_continue = 'can continue across lines' +\ This is an example of a valid comment +| ra | dec | sai |-----v2---| sptype | +| real | real | int | real | char | +| unit | unit | unit | unit | ergs | +| null | null | -999 | null | -999 | + null 29.09056 -999 2.06000 -999 +12345678901234567890123456789012345678901234567890123456789012345 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/ipac.dat.bz2 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/ipac.dat.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..a1dd3412aa5b3fb8578f614b6a647c6cb5e50251 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/ipac.dat.bz2 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/ipac.dat.xz b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/ipac.dat.xz new file mode 100644 index 0000000000000000000000000000000000000000..cf06f78fef00e3b09b40bea086b5714bd3ca4b24 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/ipac.dat.xz differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex1.tex b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex1.tex new file mode 100644 index 0000000000000000000000000000000000000000..cf65b3a61eb8074d0c802e36f49aace67232413a --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex1.tex @@ -0,0 +1,10 @@ +\begin{table} +\caption{\ion{Ne}{ix} Ly series and \ion{Mg}{xi} triplet fluxes (errors are 5$1\sigma$ confidence intervals) \label{tab:nely}} +\begin{tabular}{lrr}\hline +cola & colb & colc\\ +\hline +a & 1 & 2\\ +b & 3 & 4\\ +\hline +\end{tabular} +\end{table} \ No newline at end of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex1.tex.gz b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex1.tex.gz new file mode 100644 index 0000000000000000000000000000000000000000..da586f2bce2026669c2eb45b2f74423fab1d6339 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex1.tex.gz differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex2.tex b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex2.tex new file mode 100644 index 0000000000000000000000000000000000000000..f0d85ee16269d625661ba889a0f12770dc96933e --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex2.tex @@ -0,0 +1,14 @@ +\begin{deluxetable}{llrl} +%\tabletypesize{\scriptsize} +%\rotate +\tablecaption{Log of observations\label{tab:obslog}} +\tablewidth{0pt} +\tablehead{\colhead{Facility} & \colhead{Id} & \colhead{exposure} & \colhead{date}} + +\startdata +Chandra & \dataset[ADS/Sa.CXO#obs/06438]{ObsId 6438} & 23 ks & 2006-12-10\\ +Spitzer & AOR 3656448 & 41.6 s & 2004-06-09\\ +FLWO & filter: $B$ & 600 s & 2009-11-18\\ +\enddata + +\end{deluxetable} diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex3.tex b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex3.tex new file mode 100644 index 0000000000000000000000000000000000000000..a8c78037948a83ade85fc0cc304b728ff025d1ad --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/latex3.tex @@ -0,0 +1,7 @@ +\begin{tabular}{lrr}\hline +cola & colb & colc\\ +\hline +a & 1 & 2\\ +b & 3 & 4\\ +\hline +\end{tabular} diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/nls1_stackinfo.dbout b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/nls1_stackinfo.dbout new file mode 100644 index 0000000000000000000000000000000000000000..015139fc00dd4064f5b4fc3e1b382c9b6309320a --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/nls1_stackinfo.dbout @@ -0,0 +1,60 @@ + |objID |osrcid |xsrcid |SpecObjID |ra |dec |obsid |ccdid |z |modelMag_i |modelMagErr_i |modelMag_r |modelMagErr_r |expo |theta |rad_ecf_39 |detlim90 |fBlim90 +|-----------------------|-----------------|---------------|-----------------------|--------------------|--------------------|-----------|-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|-------------------- +| 277955213|S000.7044P00.7513|XS04861B6_005 | 10943136| 0.704453| 0.751336| 4861| 6| 0.086550| 15.462060| 0.003840| 16.063650| 0.003888| 5104.621261| 0.105533| 3.022382| 15.117712| 0.311318 +| 889974380|S002.9051P14.7003|XS03957B7_004 | 21189832| 2.905195| 14.700391| 3957| 7| 0.131820| 16.466050| 0.004807| 16.992690| 0.004917| 1479.207035| 0.118550| 3.016342| 17.364280| 0.880407 +| 661258793|S005.7709M01.1287|XS04079B7_003 | 10999832| 5.770986| -1.128731| 4079| 7| 0.166355| 17.232030| 0.008332| 17.549760| 0.007209| 1540.924685| 0.073783| 1.489627| 11.915912| 0.561011 +| 809266720|S006.9683P00.4376|XS04080B7_003 | 11027112| 6.968335| 0.437687| 4080| 7| 0.205337| 17.600880| 0.007790| 18.047560| 0.007439| 1373.690631| 0.073017| 1.489627| 15.480587| 0.807865 +| 275803698|S014.7729P00.1143|XS02179B6_001 | 11140928| 14.772956| 0.114358| 2179| 6| 0.718880| 17.487000| 0.006978| 17.441360| 0.005979| 2043.570572| 0.091283| 1.453126| 13.288200| 0.676781 +| 610324605|S029.2184M00.2061|XS04081B7_004 | 11365768| 29.218458| -0.206140| 4081| 7| 0.163040| 17.522280| 0.006957| 17.821940| 0.006828| 1513.497218| 0.073333| 1.489627| 12.188137| 0.580337 +| 819359440|S029.9901P00.5529|XS05777B1_005 | 11365080| 29.990162| 0.552903| 5777| 1| 0.311778| 18.508300| 0.013120| 18.822060| 0.011235| 16875.600510| 0.173000| 5.127182| 29.849694| 0.201770 +| 359375943|S037.1728P00.8690|XS04083B7_002 | 11478640| 37.172803| 0.869065| 4083| 7| 0.186225| 17.741220| 0.008360| 18.157300| 0.007994| 1600.672011| 0.074100| 1.489627| 12.060426| 0.546492 +| 680002094|S048.6144M01.1978|XS04084B7_001 | 11619072| 48.614411| -1.197867| 4084| 7| 0.387004| 18.084100| 0.008811| 18.047740| 0.007107| 1688.844386| 0.074850| 1.489627| 14.418508| 0.665490 +| 207476987|S122.0691P21.1492|XS03785B1_003 | 54178104| 122.069156| 21.149206| 3785| 1| 0.142121| 18.795740| 0.014157| 19.272550| 0.014808| 15935.690359| 0.148833| 5.116525| 25.744492| 0.182462 +| 314622107|S124.9642P36.8307|XS04119B3_002 | 25158064| 124.964241| 36.830793| 4119| 3| 0.736540| 19.246110| 0.015329| 19.180730| 0.011400| 6686.525810| 0.191800| 7.738524| 30.212630| 0.663496 +| 499048612|S128.7287P55.5725|XS04940B7_008 | 50209680| 128.728748| 55.572530| 4940| 7| 0.241157| 16.196610| 0.006701| 16.845690| 0.008232| 85385.450431| 0.021583| 0.327020| 25.359343| 0.022349 +| 509872023|S130.2167P13.2152|NULL | 68308384| 130.216762| 13.215295| 2130| 7| 0.170352| 17.437750| 0.009265| 17.989470| 0.010459| 22105.895051| 0.010694| 0.232184| 8.703367| 0.030060 + | 337394906|S134.7069P27.8194|NULL | 54460872| 134.706929| 27.819409| 5821| 3| 0.090713| 15.495630| 0.004090| 15.933850| 0.004758| 20139.691101| 0.153217| 5.127182| 26.899264| 0.175787 +| 204612149|S140.7808P30.9906|XS04122B5_001 | 54657400| 140.780817| 30.990687| 4122| 5| 0.629145| 18.845160| 0.012765| 18.948480| 0.010724| 4173.745162| 0.192050| 7.739623| 37.336536| 0.819048 +| 731490396|S147.5151P17.1590|XS03274B2_001 | 66732256| 147.515194| 17.159084| 3274| 2| 0.195364| 17.472340| 0.006327| 17.783260| 0.006028| 14096.036370| 0.032833| 0.366684| 9.702502| 0.075172 +| 138368206|S147.6362P59.8164|NULL | 12773752| 147.636280| 59.816408| 3036| 2| 0.652411| 19.914220| 0.029210| 20.094790| 0.024926| 4012.606072| 0.167283| 5.127182| 21.727958| 0.697762 +| 561051767|S151.8587P12.8156|XS05606B7_004 | 49112864| 151.858761| 12.815617| 5606| 7| 0.240653| 15.175160| 0.004690| 15.348870| 0.004204| 33943.906753| 0.008806| 0.243602| 8.594830| 0.019169 +| 827223175|S153.3119M00.8760|XS04085B7_001 | 7622024| 153.311933| -0.876011| 4085| 7| 0.275749| 17.638600| 0.006945| 17.638410| 0.005750| 1769.308133| 0.074400| 1.489627| 12.371362| 0.512717 +| 125920375|S160.6255P01.0399|XS04086B7_004 | 7762256| 160.625571| 1.039913| 4086| 7| 0.115493| 16.476400| 0.006180| 16.952690| 0.005979| 1351.602676| 0.074017| 1.489627| 12.388117| 0.674828 +| 126051412|S160.8870P01.0191|XS04086B2_001 | 7762456| 160.887017| 1.019120| 4086| 2| 0.071893| 15.403520| 0.003958| 15.830900| 0.003798| 1470.312820| 0.188000| 7.763980| 24.843778| 2.297003 +| 199471676|S169.6261P40.4316|XS00868B3_001 | 40555520| 169.626193| 40.431669| 868| 3| 0.154596| 15.520440| 0.003612| 15.843520| 0.003574| 15875.864312| 0.039917| 0.409867| 10.331539| 0.069317 +| 911117410|S174.3501P30.0602|XS04161B7_011 | 62510944| 174.350159| 30.060294| 4161| 7| 0.695136| 19.910250| 0.032209| 20.022840| 0.021641| 14988.033880| 0.062233| 0.614561| 11.836136| 0.055041 +| 302536231|S179.8826P29.2455|XS00874B3_007 | 62651000| 179.882670| 29.245515| 874| 3| 0.724488| 17.965190| 0.007865| 18.090560| 0.007281| 94375.899791| 0.004833| 0.230755| 8.240796| 0.009124 +| 302601830|S179.9533P29.1580|XS00874B2_001 | 62623640| 179.953385| 29.158023| 874| 2| 0.083344| 15.802610| 0.004156| 16.238050| 0.004098| 84775.542942| 0.105917| 3.022382| 21.690631| 0.026736 +| 115261957|S180.7950P57.6803|XS05757B0_022 | 37008112| 180.795062| 57.680354| 5757| 0| 0.759025| 18.066390| 0.008409| 18.060240| 0.006947| 40390.627482| 0.178917| 5.125388| 34.575626| 0.096856 +| 607275593|S183.4289P02.8802|XS04934B3_004 | 14602336| 183.428996| 2.880256| 4934| 3| 0.641174| 19.083390| 0.016683| 19.264170| 0.014156| 17374.807609| 0.067033| 1.489627| 12.291123| 0.078403 +| 425979958|S183.5631P00.9198|XS04087B7_004 | 8100808| 183.563163| 0.919874| 4087| 7| 0.395653| 18.254720| 0.010882| 18.328170| 0.008583| 1743.376400| 0.075183| 1.489627| 12.265030| 0.491269 +| 189855768|S184.4790P58.6599|XS03558B3_002 | 37036288| 184.479077| 58.659912| 3558| 3| 0.023181| 14.626880| 0.002469| 14.904420| 0.002339| 6129.941952| 0.003750| 0.232403| 7.666659| 0.136118 +| 619169285|S187.0751P44.2172|NULL | 38612200| 187.075137| 44.217228| 938| 0| 0.662250| 17.907240| 0.007109| 18.053730| 0.006975| 2298.154599| 0.172200| 5.125388| 20.352558| 0.848250 +| 325588542|S187.5646P03.0485|XS04040B7_001 | 14659784| 187.564673| 3.048508| 4040| 7| 0.137670| 16.402290| 0.004927| 17.103210| 0.005467| 3409.797684| 0.010833| 0.243602| 7.851051| 0.160035 +| 574503609|S187.6176P47.8825|NULL | 40921304| 187.617696| 47.882592| 3071| 3| 0.259120| 18.357610| 0.011731| 18.646700| 0.010925| 6222.550176| 0.197167| 7.763595| 30.051725| 0.668981 +| 101878322|S188.4820P13.0754|XS02107B7_001 | 45509408| 188.482006| 13.075423| 2107| 7| 0.480211| 18.623910| 0.015033| 19.178470| 0.015434| 5550.153407| 0.015417| 0.276499| 8.218379| 0.089948 +| 834099774|S188.5555P47.8975|XS03055B7_001 | 40921752| 188.555591| 47.897583| 3055| 7| 0.372812| 16.768010| 0.004767| 16.822040| 0.004038| 4452.821575| 0.009889| 0.243602| 8.075360| 0.119255 +| 528223925|S191.3095P01.1419|NULL | 8213952| 191.309592| 1.141912| 2974| 2| 0.091196| 16.407150| 0.006573| 16.882680| 0.006505| 5665.430694| 0.160700| 5.127182| 21.105785| 0.481827 +| 430960732|S194.9316P01.0486|XS04088B7_005 | 8269800| 194.931643| 1.048622| 4088| 7| 0.394569| 18.230550| 0.009419| 18.305880| 0.007674| 1532.367693| 0.075000| 1.489627| 11.697405| 0.529051 +| 040450702|S196.9301P46.7193|NULL | 41090688| 196.930172| 46.719346| 3244| 6| 0.600141| 19.711200| 0.021784| 20.631250| 0.030904| 8481.301760| 0.184333| 5.111493| 28.487300| 0.409704 +| 895335014|S197.7853P00.5310|XS04089B7_006 | 8297720| 197.785328| 0.531036| 4089| 7| 0.429236| 17.838440| 0.007412| 17.883200| 0.006128| 1342.669846| 0.075917| 1.489627| 11.975790| 0.622564 +| 362199556|S206.2204P00.0889|NULL | 8438656| 206.220450| 0.088956| 2251| 6| 0.087128| 15.878880| 0.003993| 16.339870| 0.003999| 7732.826167| 0.146900| 5.125388| 24.196409| 0.276815 +| 390308579|S213.1444M00.5833|XS04090B7_001 | 8550616| 213.144471| -0.583347| 4090| 7| 0.126940| 16.924460| 0.008082| 17.337560| 0.007611| 1850.463370| 0.074433| 1.489627| 12.129062| 0.475026 +| 444464848|S213.7065P36.2111|XS04163B1_002 | 46269424| 213.706536| 36.211187| 4163| 1| 0.180925| 17.916410| 0.009867| 18.346860| 0.009661| 81178.124219| 0.092700| 1.460516| 17.286353| 0.023684 +| 222587913|S216.7550P44.2825|XS06112B2_004 | 36276768| 216.755074| 44.282505| 6112| 2| 0.735436| 19.039310| 0.015654| 19.133000| 0.012307| 7202.822662| 0.137533| 3.019678| 17.903948| 0.270855 +| 929145428|S217.6259M00.1875|XS04091B7_004 | 8607176| 217.625904| -0.187530| 4091| 7| 0.103307| 17.334130| 0.007846| 17.791860| 0.007610| 1362.631234| 0.075700| 1.489627| 11.674970| 0.622522 +| 428847268|S217.6691P36.8177|XS04126B7_001 | 38894856| 217.669106| 36.817754| 4126| 7| 0.566053| 18.744800| 0.010372| 19.168410| 0.011316| 2834.413800| 0.009583| 0.243602| 7.405304| 0.178125 +| 440484921|S219.7460P03.5965|XS03290B1_006 | 16516928| 219.746065| 3.596520| 3290| 1| 0.733848| 18.461360| 0.009410| 18.429130| 0.008255| 48647.675049| 0.137250| 3.014202| 26.440169| 0.059182 +| 468047975|S222.3062P00.4019|XS04092B7_004 | 8691936| 222.306273| 0.401911| 4092| 7| 0.440801| 18.675470| 0.012882| 18.855400| 0.010686| 1574.467045| 0.052750| 0.607436| 10.031873| 0.470555 +| 468113483|S222.3862P00.3767|XS04092B7_001 | 8691984| 222.386270| 0.376752| 4092| 7| 0.080563| 16.388650| 0.004431| 16.884420| 0.004493| 1920.873200| 0.074717| 1.489627| 12.712145| 0.488745 +| 931439168|S222.8459M00.1071|XS04093B7_001 | 8691960| 222.845909| -0.107191| 4093| 7| 0.138627| 17.058580| 0.006735| 17.488910| 0.006241| 1898.411113| 0.075567| 1.489627| 11.967944| 0.467229 +| 262643238|S235.8184P54.0905|XS00822B6_002 | 17361832| 235.818430| 54.090581| 822| 6| 0.245121| 17.540910| 0.006667| 17.778130| 0.006324| 3636.411721| 0.140183| 3.019678| 18.358903| 0.439665 +| 926158050|S240.8326P42.3631|NULL | 37599160| 240.832606| 42.363127| 5609| 6| 0.245845| 18.507290| 0.014520| 18.790040| 0.011571| 11325.862421| 0.133133| 3.019841| 18.982950| 0.171760 +| 608676499|S245.9044P31.1722|XS05607B7_001 | 39992048| 245.904431| 31.172231| 5607| 7| 0.235655| 18.073020| 0.012637| 18.487890| 0.011318| 16902.229821| 0.033283| 0.375642| 10.006948| 0.042823 +| 960066205|S246.4348P15.8271|XS03229B1_001 | 62172624| 246.434806| 15.827186| 3229| 1| 0.798335| 18.653970| 0.012212| 18.576130| 0.008949| 42261.803686| 0.138833| 3.014202| 25.770130| 0.068354 +| 134019205|S256.4454P63.1831|XS04094B7_002 | 9845344| 256.445484| 63.183108| 4094| 7| 0.119156| 17.496630| 0.006940| 17.887740| 0.006407| 1714.376381| 0.075200| 1.489627| 11.843095| 0.497777 +| 134609053|S257.3217P61.8895|XS04864B6_004 | 9902624| 257.321721| 61.889546| 4864| 6| 0.292492| 18.075020| 0.010429| 18.285270| 0.008331| 3266.690360| 0.109617| 3.019678| 16.988875| 0.540981 +| 213815608|S260.0418P26.6255|XS04361B3_014 | 27578480| 260.041831| 26.625566| 4361| 3| 0.159240| 14.936350| 0.003416| 15.449310| 0.003666| 23666.399953| 0.037933| 0.399900| 23.603727| 0.111820 +| 849702763|S264.1609P53.9090|XS04863B6_002 | 10155040| 264.160900| 53.909041| 4863| 6| 0.407487| 18.748560| 0.014215| 19.233860| 0.015527| 4649.657613| 0.107100| 3.022382| 16.279548| 0.369717 +| 801664702|S349.5880P00.4935|NULL | 10774344| 349.588069| 0.493526| 4938| 7| 0.376296| 18.852000| 0.018428| 19.022390| 0.013564| 28181.469589| 0.117017| 3.046228| 21.942945| 0.059519 +| 275333773|S354.7242P00.8034|XS04095B7_002 | 10859368| 354.724289| 0.803473| 4095| 7| 0.169759| 17.812580| 0.009228| 18.205800| 0.008355| 1513.825375| 0.075283| 1.489627| 12.057631| 0.593043 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_cds.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_cds.dat new file mode 100644 index 0000000000000000000000000000000000000000..dfe11a0444eb72498a9bcb7d539afed395fb9e81 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_cds.dat @@ -0,0 +1,37 @@ + + + + +Title: Spitzer Observations of NGC 1333: A Study of Structure and Evolution + in a Nearby Embedded Cluster +Authors: Gutermuth R.A., Myers P.C., Megeath S.T., Allen L.E., Pipher J.L., + Muzerolle J., Porras A., Winston E., Fazio G. +Table: Spitzer-identified YSOs: Addendum +================================================================================ +Byte-by-byte Description of file: datafile3.txt +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 3 I3 --- Index Running identification number + 5- 6 I2 h RAh Hour of Right Ascension (J2000) + 8- 9 I2 min RAm Minute of Right Ascension (J2000) + 11- 15 F5.2 s RAs Second of Right Ascension (J2000) + - continuation of description + 17 A1 --- DE- Sign of the Declination (J2000) + 18- 19 I2 deg DEd Degree of Declination (J2000) + 21- 22 I2 arcmin DEm Arcminute of Declination (J2000) + 24- 27 F4.1 arcsec DEs Arcsecond of Declination (J2000) + 29- 68 A40 --- Match Literature match + 70- 75 A6 --- Class Source classification (1) + 77-80 F4.2 mag AK ? The K band extinction (2) + 82-86 F5.2 --- Fit ? Fit of IRAC photometry (3) +-------------------------------------------------------------------------------- +Note (1): Asterisks mark "deeply embedded" sources with questionable IRAC + colors or incomplete IRAC photometry and relatively bright + MIPS 24 micron photometry. +Note (2): Only provided for sources with valid JHK_S_ photometry. +Note (3): Defined as the slope of the linear least squares fit to the + 3.6 - 8.0 micron SEDs in log{lambda} F_{lambda} vs log{lambda} space. + Extinction is not accounted for in these values. High extinction can + bias Fit to higher values. +-------------------------------------------------------------------------------- diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_daophot.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_daophot.dat new file mode 100644 index 0000000000000000000000000000000000000000..3fb49eff720f96acff9947288b4553d1a7301f10 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_daophot.dat @@ -0,0 +1,7 @@ +#K MERGERAD = INDEF scaleunit %-23.7g +#N ID XCENTER YCENTER MAG MERR MSKY NITER \ +#U ## pixels pixels magnitudes magnitudes counts ## \ +#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d +#N SHARPNESS CHI PIER PERROR \ +#U ## ## ## perrors \ +#F %-23.3f %-12.3f %-6d %-13s diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_ipac.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_ipac.dat new file mode 100644 index 0000000000000000000000000000000000000000..eecb4883012a08495bd6961f3acab1ee361dfc53 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_ipac.dat @@ -0,0 +1,10 @@ +\catalog = sao +\date = "Wed Sp 20 09:48:36 1995" +\mykeyword = 'Another way for defining keyvalue string' +\ This is an example of a valid comment. +\ The 2nd data line is used to verify the exact column parsing +\ (unclear if this is a valid for the IPAC format) +| ra | dec | sai |-----v2---| sptype | +| real | real | int | real | char | +| unit | unit | unit | unit | ergs | +| null | null | null | null | -999 | diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_sextractor.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_sextractor.dat new file mode 100644 index 0000000000000000000000000000000000000000..a9da39b1d773f5f4b0be097c42c0e61c43cbcf72 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_sextractor.dat @@ -0,0 +1,5 @@ +# 1 NUMBER Galaxy ID number +# 2 FLUX_ISO +# 3 FLUXERR_ISO +# 4 VALUES Note column 5 is missing +# 6 FLAG diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_with_header.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_with_header.dat new file mode 100644 index 0000000000000000000000000000000000000000..3774da60e546d90f9ed27bf587535765c992c4ad --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_with_header.dat @@ -0,0 +1 @@ +a b c diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_without_header.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_without_header.dat new file mode 100644 index 0000000000000000000000000000000000000000..0641385e1fc829c7e3fa82f3732d7b3da92c77f3 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/no_data_without_header.dat @@ -0,0 +1,2 @@ +# blank data table + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/sextractor.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/sextractor.dat new file mode 100644 index 0000000000000000000000000000000000000000..59062194d6ef1b36ab670f803c5f9f38de553d06 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/sextractor.dat @@ -0,0 +1,8 @@ +# 1 NUMBER Galaxy ID number +# 2 FLUX_ISO +# 3 FLUXERR_ISO +# 4 VALU-ES Note column 5 is missing +# 6 FLAG +1 0.02580616000000000 0.03974229000000000 1.6770000000000000 0.2710000000000000 0 +2 5.72769100000000009 0.20643300000000001 2.6250000000000000 2.5219999999999998 0 +3 88.31933999999999685 0.59369850000000002 5.9249999999999998 4.7140000000000004 0 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/sextractor2.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/sextractor2.dat new file mode 100644 index 0000000000000000000000000000000000000000..679ea39c2ef6488e815554c10258a6a9c2a4a2ff --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/sextractor2.dat @@ -0,0 +1,14 @@ +# 1 NUMBER Running object number +# 2 XWIN_IMAGE Windowed position estimate along x [pixel] +# 3 YWIN_IMAGE Windowed position estimate along y [pixel] +# 4 MAG_AUTO Kron-like elliptical aperture magnitude [mag] +# 5 MAGERR_AUTO RMS error for AUTO magnitude [mag] +# 6 FLAGS Extraction flags +# 7 X2_IMAGE [pixel**2] +# 8 X_MAMA Barycenter position along MAMA x axis [m**(-6)] +# 9 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)] +1 100.523 11.911 -5.3246 0.0416 19 1000.0 0.00304 -3.498 +2 100.660 4.872 -6.4538 0.0214 27 1500.0 0.00908 1.401 +3 131.046 10.382 -4.6836 0.0524 17 500.0 0.01004 2.512 +4 338.959 4.966 -7.1747 0.0173 25 1200.0 0.00792 2.901 +5 166.280 3.956 -4.0865 0.0621 25 800.0 0.00699 -6.489 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/sextractor3.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/sextractor3.dat new file mode 100644 index 0000000000000000000000000000000000000000..51adb21c3c3dcbd21194229a1d54a321529ce22b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/sextractor3.dat @@ -0,0 +1,10 @@ +# 1 X_IMAGE Object position along x [pixel] +# 2 Y_IMAGE [pixel] +# 3 ALPHA_J2000 Right ascension of barycenter (J2000) [deg] +# 4 DELTA_J2000 Declination of barycenter (J2000) [deg] +# 5 MAG_AUTO Kron-like elliptical aperture magnitude [mag] +# 6 MAGERR_AUTO RMS error for AUTO magnitude [mag] +# 7 MAG_APER Fixed aperture magnitude vector [mag] +# 14 MAGERR_APER RMS error vector for fixed aperture mag. [mag] + 1367.000 184.404 265.1445228 +68.7507679 22.9929 0.2218 24.1804 23.4541 22.9567 22.5162 22.1912 21.5363 21.0361 0.3262 0.2675 0.2203 0.1856 0.1683 0.1621 0.1673 + 1380.235 189.444 265.1384412 +68.7516124 20.9258 0.0569 22.2374 21.5987 21.2943 21.1244 20.9838 20.6672 20.0695 0.0645 0.0497 0.0495 0.0520 0.0533 0.0602 0.0515 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb new file mode 100644 index 0000000000000000000000000000000000000000..29f300d0feb31e727d23f6c525f7f988792526e1 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb @@ -0,0 +1,14 @@ + +# blank lines + +agasc_id n_noids n_obs +N N N +115345072 1 1 + # comment +335416352 3 8 +266612160 1 1 +645803280 1 1 +117309912 1 1 +114950920 1 1 +335025040 2 24 + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb.bz2 b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..9e6c49586d7fb2431a85a80bed6ab19f5892743b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb.bz2 differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb.gz b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb.gz new file mode 100644 index 0000000000000000000000000000000000000000..92172ff323db945d85debb97ccba8c8639d9f130 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb.gz differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb.xz b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb.xz new file mode 100644 index 0000000000000000000000000000000000000000..93faba9853444db60d0f4a0597923b097d288600 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.rdb.xz differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.tab b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.tab new file mode 100644 index 0000000000000000000000000000000000000000..66e3e7f45acb3e56e9dacf88c740050cb3aac826 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/short.tab @@ -0,0 +1,8 @@ +agasc_id n_noids n_obs +115345072 1 1 +335416352 3 8 +266612160 1 1 +645803280 1 1 +117309912 1 1 +114950920 1 1 +335025040 2 24 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple.txt new file mode 100644 index 0000000000000000000000000000000000000000..d0a4e27cdc0908f5aff0124612add78fb60d5882 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple.txt @@ -0,0 +1,4 @@ + 'test 1a' test2 test3 test4 + # fun1 fun2 fun3 fun4 fun5 + top1 top2 top3 top4 +hat1 hat2 hat3 hat4 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple2.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple2.txt new file mode 100644 index 0000000000000000000000000000000000000000..0fbb9f6ee33c3bce34955f75d8b53c5c8df0aa8d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple2.txt @@ -0,0 +1,4 @@ +obsid | redshift | X | Y | object | rad +3102 | 0.32 | 4167 | 4085 | Q1250+568-A | 9 +3102 | 0.32 | 4706 | 3916 | Q1250+568-B | 14 +877 | 0.22 | 4378 | 3892 | 'Source 82' | 12.5 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple3.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple3.txt new file mode 100644 index 0000000000000000000000000000000000000000..ab6e6577c48c92ea98e09ff15411aaa20e2dab0f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple3.txt @@ -0,0 +1,3 @@ +obsid|redshift|X|Y|object|rad +877|0.22|4378|3892|'Sou,rce82'|12.5 +3102|0.32|4167|4085|Q1250+568-A|9 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple4.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple4.txt new file mode 100644 index 0000000000000000000000000000000000000000..62e922aaf7b9d9572e65621ba2bc5a8462e94fb7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple4.txt @@ -0,0 +1,3 @@ +3102 | 0.32 | 4167 | 4085 | Q1250+568-A | 9 +3102 | 0.32 | 4706 | 3916 | Q1250+568-B | 14 +877 | 0.22 | 4378 | 3892 | 'Source 82' | 12.5 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple5.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple5.txt new file mode 100644 index 0000000000000000000000000000000000000000..771840414019dc5feba0bfb386400fad72c494d9 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple5.txt @@ -0,0 +1,4 @@ +# Purposely make an ill-formed data file (in last row) +3102 | 0.32 | 4167 | 4085 | Q1250+568-A | 9 +3102 | 0.32 | 4706 | 3916 | Q1250+568-B | 14 +877 | 4378 | 3892 | 'Source 82' | 12.5 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple_csv.csv b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple_csv.csv new file mode 100644 index 0000000000000000000000000000000000000000..efb98239f9acc030f98b2cd1957ce7c9b4b9f2c3 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple_csv.csv @@ -0,0 +1,3 @@ +a,b,c +1,2,3 +4,5,6 \ No newline at end of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple_csv_missing.csv b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple_csv_missing.csv new file mode 100644 index 0000000000000000000000000000000000000000..9c87d2d85b631f8e7fcfcab4b3c710d184eb78a4 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/simple_csv_missing.csv @@ -0,0 +1,3 @@ +a,b,c +1 +4,5,6 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/space_delim_blank_lines.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/space_delim_blank_lines.txt new file mode 100644 index 0000000000000000000000000000000000000000..b096c4f72c4ff7d5e9e14055c0096056b996424d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/space_delim_blank_lines.txt @@ -0,0 +1,8 @@ +obsid offset x y name oaa + +3102 0.32 4167 4085 Q1250+568-A 9 +3102 0.32 4706 3916 Q1250+568-B 14 +877 0.22 4378 3892 "Source 82" 12.5 + + + diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/space_delim_no_header.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/space_delim_no_header.dat new file mode 100644 index 0000000000000000000000000000000000000000..f8cfc69220c5452d83f7859b7b39c4cbe7568855 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/space_delim_no_header.dat @@ -0,0 +1,2 @@ +1 3.4 hello +2 6.4 world diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/space_delim_no_names.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/space_delim_no_names.dat new file mode 100644 index 0000000000000000000000000000000000000000..2c3f803432c8491b72e08c9bf72c4f3e3bc84316 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/space_delim_no_names.dat @@ -0,0 +1,2 @@ +1 2 +3 4 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/test4.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/test4.dat new file mode 100644 index 0000000000000000000000000000000000000000..329d5f4c121ed13a24c94814ba21b98b84a358e8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/test4.dat @@ -0,0 +1,12 @@ +# whitespace separated +zabs1.nh p1.gamma p1.ampl statname statval + 0.0872113431031 1.26764500000 0.000699751823872 input 0.0 +0.0863775314648 1.26769713012 0.000698799851356 chi2constvar 494.396534577 +0.0839710433091 1.25997502704 0.000696444029148 chi2modvar 497.56468441 +0.0867933991271 1.27045571779 0.000699526507899 cash -579508.340504 + # comment here +0.0913252611282 1.28738450369 0.000703999531569 chi2gehrels 416.904139981 +0.0943815607455 1.29839188657 0.000708725775733 chi2datavar 572.734008 +0.0943792771442 1.29837677223 0.00070871697621 chi2xspecvar 572.734013473 +0.0867953584196 1.27046735536 0.000699532088738 cstat 512.433488994 +0.0846479114132 1.26584338176 0.000697063608605 chi2constvar 440.651434041 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/test5.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/test5.dat new file mode 100644 index 0000000000000000000000000000000000000000..316c4ff27f35c6adf02491d24f643dc5c8e1b132 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/test5.dat @@ -0,0 +1,22 @@ +# whitespace separated with lines to skip +------------------------------------------ +zabs1.nh p1.gamma p1.ampl statname statval +------------------------------------------ +0.095196313612 1.29238107724 0.000709438701165 chi2xspecvar 455.385700456 +0.0898827896112 1.27317260145 0.000703680688865 cstat 450.402806957 +0.0845373292976 1.26032264432 0.000697817633266 chi2constvar 427.888401816 +0.0813955290921 1.25278166998 0.000694773889339 chi2modvar 422.655226097 +0.0837813193374 1.26108631851 0.000697168659777 cash -582096.060739 +0.0877788113875 1.27498889089 0.000700963122261 chi2gehrels 336.255262001 +0.0886095763534 1.27831934755 0.000702152760295 chi2datavar 427.87097831 +0.0886062881606 1.27831561342 0.000702152575029 chi2xspecvar 427.870972282 +0.0837839157029 1.26109967845 0.000697177275745 cstat 423.869897301 +0.0848856095291 1.26216881055 0.000697245258092 chi2constvar 495.692552206 +0.0834040516574 1.25034791909 0.000694504650678 chi2modvar 448.488349352 +0.0863275923367 1.25920642303 0.000697302969088 cash -581109.867406 +0.0910593842926 1.27434931431 0.000701687557965 chi2gehrels 362.107884887 +0.0925984360666 1.27857224315 0.000703586368322 chi2datavar 467.653055046 +0.0926057133247 1.27858701992 0.000703594356786 chi2xspecvar 467.653060082 +0.0863257498551 1.259192667 0.000697300429366 cstat 451.536967896 +0.0880503692681 1.2588289844 0.000698437310968 chi2constvar 439.513117058 +0.0852962921333 1.25214407357 0.000696223065852 chi2modvar 443.456904712 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vizier/ReadMe b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vizier/ReadMe new file mode 100644 index 0000000000000000000000000000000000000000..b85d8d5af4ab5071b2aff2d8607939c67097f1e9 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vizier/ReadMe @@ -0,0 +1,89 @@ +J/A+A/511/A56 Abundances of five open clusters (Pancino+, 2010) +================================================================================ +Chemical abundance analysis of the open clusters Cr 110, NGC 2420, NGC 7789, +and M 67 (NGC 2682). + Pancino E., Carrera R., Rossetti, E., Gallart C. + + =2010A&A...511A..56P +================================================================================ +ADC_Keywords: Clusters, open ; Stars, giant ; Equivalent widths ; Spectroscopy +Keywords: stars: abundances - Galaxy: disk - + open clusters and associations: general + +Abstract: + The present number of Galactic open clusters that have high resolution + abundance determinations, not only of [Fe/H], but also of other key + elements, is largely insufficient to enable a clear modeling of the + Galactic disk chemical evolution. To increase the number of Galactic + open clusters with high quality measurements, we obtained high + resolution (R~30000), high quality (S/N~50-100 per pixel), echelle + spectra with the fiber spectrograph FOCES, at Calar Alto, Spain, for + three red clump stars in each of five Open Clusters. We used the + classical equivalent width analysis method to obtain accurate + abundances of sixteen elements: Al, Ba, Ca, Co, Cr, Fe, La, Mg, Na, + Nd, Ni, Sc, Si, Ti, V, and Y. We also derived the oxygen abundance + using spectral synthesis of the 6300{AA} forbidden line. + +Description: + Atomic data and equivalent widths for 15 red clump giants in 5 open + clusters: Cr 110, NGC 2099, NGC 2420, M 67, NGC 7789. + +File Summary: +-------------------------------------------------------------------------------- + FileName Lrecl Records Explanations +-------------------------------------------------------------------------------- +ReadMe 80 . This file +table1.dat 103 15 Observing logs and programme stars information +table5.dat 56 5265 Atomic data and equivalent widths +-------------------------------------------------------------------------------- + +See also: + J/A+A/455/271 : Abundances of red giants in NGC 6441 (Gratton+, 2006) + J/A+A/464/953 : Abundances of red giants in NGC 6441 (Gratton+, 2007) + J/A+A/505/117 : Abund. of red giants in 15 globular clusters (Carretta+, 2009) + +Byte-by-byte Description of file: table1.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 7 A7 --- Cluster Cluster name + 9- 12 I4 --- Star Star number within the cluster + 14- 15 I2 h RAh Right ascension (J2000) + 17- 18 I2 min RAm Right ascension (J2000) + 20- 23 F4.1 s RAs Right ascension (J2000) + 25 A1 --- DE- Declination sign (J2000) + 26- 27 I2 deg DEd Declination (J2000) + 29- 30 I2 arcmin DEm Declination (J2000) + 32- 35 F4.1 arcsec DEs Declination (J2000) + 37- 41 F5.2 mag Bmag B magnitude + 43- 47 F5.2 mag Vmag V magnitude + 49- 53 F5.2 mag Icmag ?=- Cousins I magnitude + 55- 59 F5.2 mag Rmag ?=- R magnitude + 61- 65 F5.2 mag Ksmag Ks magnitude + 67 I1 --- NExp Number of exposures + 69- 73 I5 s TExp Total exposure time + 75- 77 I3 --- S/N Signal-to-nois ratio + 79-103 A25 --- SName Simbad name +-------------------------------------------------------------------------------- + +Byte-by-byte Description of file: table5.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 7 A7 --- Cluster Cluster name + 9- 12 I4 --- Star Star number within the cluster + 14- 20 F7.2 0.1nm Wave Wavelength in Angstroms + 22- 23 A2 --- El Element name + 24 I1 --- ion Ionization stage (1 for neutral element) + 26- 30 F5.2 eV chiEx Excitation potential + 32- 37 F6.2 --- loggf Logarithm of the oscillator strength + 39- 43 F5.1 0.1pm EW ?=-9.9 Equivalent width (in mA) + 46- 49 F4.1 0.1pm e_EW ?=-9.9 rms uncertainty on EW + 51- 56 F6.3 --- Q ?=-9.999 DAOSPEC quality parameter Q + (large values are bad) +-------------------------------------------------------------------------------- + +Acknowledgements: + Elena Pancino, elena.pancino(at)oabo.inaf.it +================================================================================ +(End) Elena Pancino [INAF-OABo, Italy], Patricia Vannier [CDS] 23-Nov-2009 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vizier/table1.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vizier/table1.dat new file mode 100644 index 0000000000000000000000000000000000000000..42d97d02ae1d31ca8efa10ba49e12ad6b256a24d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vizier/table1.dat @@ -0,0 +1,15 @@ +Cr110 2108 06 38 52.5 +02 01 58.4 14.79 13.35 -- --- 9.76 6 16200 70 Cl* Collinder 110 DI 2108 +Cr110 2129 06 38 41.1 +02 01 05.5 15.00 13.66 12.17 12.94 10.29 7 18900 70 Cl* Collinder 110 DI 2129 +Cr110 3144 06 38 30.3 +02 03 03.0 14.80 13.49 12.04 12.72 10.19 6 16195 65 Cl* Collinder 110 DI 3144 +NGC2099 67 05 52 16.6 +32 34 45.6 12.38 11.12 9.87 --- 8.17 3 3600 95 NGC 2099 67 +NGC2099 148 05 52 08.1 +32 30 33.1 12.36 11.09 - --- 8.05 3 3600 105 NGC 2099 148 +NGC2099 508 05 52 33.2 +32 27 43.5 12.24 10.98 -- --- 7.92 3 3900 85 NGC 2099 508 +NGC2420 41 07 38 06.2 +21 36 54.7 13.75 12.67 11.61 12.13 10.13 5 9000 70 NGC 2420 41 +NGC2420 76 07 38 15.5 +21 38 01.8 13.65 12.66 11.65 12.14 10.31 5 9000 75 NGC 2420 76 +NGC2420 174 07 38 26.9 +21 38 24.8 13.41 12.40 ---- --- 9.98 5 9000 60 NGC 2420 174 +NGC2682 141 08 51 22.8 +11 48 01.7 11.59 10.48 9.40 9.92 7.92 3 2700 85 Cl* NGC 2682 MMU 141 +NGC2682 223 08 51 43.9 +11 56 42.3 11.68 10.58 9.50 10.02 8.00 3 2700 85 Cl* NGC 2682 MMU 223 +NGC2682 286 08 52 18.6 +11 44 26.3 11.53 10.47 9.43 9.93 7.92 3 2700 105 Cl* NGC 2682 MMU 286 +NGC7789 5237 23 56 50.6 +56 49 20.9 13.92 12.81 11.52 --- 9.89 5 9000 70 Cl* NGC 7789 G 5237 +NGC7789 7840 23 57 19.3 +56 40 51.5 14.03 12.82 11.49 --- 9.83 6 9000 75 Cl* NGC 7789 G 7840 +NGC7789 8556 23 57 27.6 +56 45 39.2 14.18 12.97 11.65 --- 10.03 3 5400 45 Cl* NGC 7789 G 8556 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vizier/table5.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vizier/table5.dat new file mode 100644 index 0000000000000000000000000000000000000000..61bc51a8af0d0b172afaa840f95a81c63c7dfc76 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vizier/table5.dat @@ -0,0 +1,49 @@ +Cr110 2108 6696.79 Al1 4.02 -1.42 29.5 2.2 0.289 +Cr110 2108 6698.67 Al1 3.14 -1.65 58.0 2.0 0.325 +Cr110 2108 7361.57 Al1 4.02 -0.90 44.1 4.0 0.510 +Cr110 2108 7362.30 Al1 4.02 -0.75 62.7 3.9 0.577 +Cr110 2108 7835.31 Al1 4.02 -0.65 73.7 6.6 0.539 +Cr110 2108 7836.13 Al1 4.02 -0.49 87.6 4.1 0.390 +Cr110 2108 8772.86 Al1 4.02 -0.32 87.6 5.1 0.957 +Cr110 2108 8773.90 Al1 4.02 -0.16 118.6 14.6 0.736 +Cr110 2108 5853.67 Ba2 0.60 -1.00 121.9 5.5 1.435 +Cr110 2108 6141.71 Ba2 0.70 -0.08 191.0 8.7 1.117 +Cr110 2108 6496.90 Ba2 0.60 -0.38 175.8 6.8 1.473 +Cr110 2108 5261.70 Ca1 2.52 -0.59 149.1 5.3 0.808 +Cr110 2108 5512.98 Ca1 2.93 -0.71 106.7 6.2 1.416 +Cr110 2108 5857.45 Ca1 2.93 0.26 163.8 19.8 2.209 +Cr110 2108 6156.02 Ca1 2.52 -2.50 42.0 4.0 0.617 +Cr110 2108 6166.44 Ca1 2.52 -1.16 110.7 3.3 1.046 +Cr110 2108 6169.04 Ca1 2.52 -0.80 127.3 5.5 1.604 +Cr110 2108 6169.56 Ca1 2.53 -0.53 148.2 6.0 1.419 +Cr110 2108 6471.66 Ca1 2.53 -0.65 130.4 5.0 1.431 +Cr110 2108 6499.65 Ca1 2.52 -0.72 129.0 5.4 1.183 +Cr110 2108 5230.20 Co1 1.74 -1.84 60.4 6.7 1.210 +Cr110 2108 5530.77 Co1 1.71 -2.06 73.2 4.3 1.005 +Cr110 2108 5590.72 Co1 2.04 -1.87 69.9 3.2 0.706 +Cr110 2108 5935.38 Co1 1.88 -2.68 33.0 4.4 0.665 +Cr110 2108 6429.91 Co1 2.14 -2.41 28.2 1.3 0.340 +Cr110 2108 6490.34 Co1 2.04 -2.52 33.6 3.5 0.323 +Cr110 2108 6632.43 Co1 2.28 -2.00 50.9 2.1 0.391 +Cr110 2108 7154.67 Co1 2.04 -2.42 45.9 1.9 0.280 +Cr110 2108 7388.69 Co1 2.72 -1.65 36.6 1.8 0.343 +Cr110 2108 7417.37 Co1 2.04 -2.07 71.4 1.9 0.369 +Cr110 2108 7838.13 Co1 3.97 -0.30 32.7 2.7 0.495 +Cr110 2108 5243.36 Cr1 3.40 -0.57 47.9 4.0 0.828 +Cr110 2108 5329.14 Cr1 2.91 -0.06 110.4 4.9 1.113 +Cr110 2108 5442.37 Cr1 3.42 -1.06 33.3 2.5 0.499 +Cr110 2108 5712.75 Cr1 3.01 -1.30 49.4 5.3 1.038 +Cr110 2108 5788.39 Cr1 3.01 -1.83 26.1 1.3 0.260 +Cr110 2108 5844.59 Cr1 3.01 -1.76 26.2 3.9 0.863 +Cr110 2108 6330.09 Cr1 0.94 -2.92 94.4 6.6 1.638 +Cr110 2108 6537.93 Cr1 1.00 -4.07 33.0 2.4 0.479 +Cr110 2108 6630.01 Cr1 1.03 -3.56 60.7 1.5 0.232 +Cr110 2108 6661.08 Cr1 4.19 -0.19 33.5 6.4 0.627 +Cr110 2108 7355.94 Cr1 2.89 -0.28 126.7 4.1 0.671 +Cr110 2108 5055.99 Fe1 4.31 -2.01 41.2 3.3 0.371 +Cr110 2108 5178.80 Fe1 4.39 -1.84 45.4 7.1 0.851 +Cr110 2108 5285.13 Fe1 4.43 -1.64 50.1 5.2 0.607 +Cr110 2108 5294.55 Fe1 3.64 -2.86 -9.9 -9.9 -9.999 +Cr110 2108 5295.31 Fe1 4.42 -1.69 38.3 9.5 1.958 +Cr110 2108 5373.71 Fe1 4.47 -0.86 91.5 5.3 1.416 +Cr110 2108 5386.33 Fe1 4.15 -1.77 55.9 6.6 0.949 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vots_spec.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vots_spec.dat new file mode 100644 index 0000000000000000000000000000000000000000..bc90130f057c24be195e76b3fa4b3d0df818bbb8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/vots_spec.dat @@ -0,0 +1,99 @@ +#################################################################################### +## +## VOTable-Simple Specification +## +## This is the specification of the VOTable-Simple (VOTS) format, given as an +## example data table with comments and references. This data table format is +## intented to provide a way of specifying metadata and data for simple tabular +## data sets. This specification is intended as a subset of the VOTable data +## model and allow easy generation of a VOTable-compliant data structure. This +## provides a uniform starting point for generating table documentation and +## performing database table creation and ingest. +## +## A python application is available which uses the STILTS java package to +## convert from a VOTS format to any of the (many) output formats supported by +## STILTS. This application can also generate a documentation file (in +## reStructured Text format) or a Django model definition from a VOTS table. +## +## Key VOTable and STILTS references: +## Full spec: http://www.ivoa.net/Documents/latest/VOT.html +## Datatypes: http://www.ivoa.net/Documents/REC/VOTable/VOTable-20040811.html#ToC11 +## FIELD def: http://www.ivoa.net/Documents/REC/VOTable/VOTable-20040811.html#ToC25 +## STILTS : http://www.star.bris.ac.uk/~mbt/stilts/ +## +## The VOTable-Simple format consists of header information followed by the tabular +## data elements. The VOTS header lines are all preceded by a single '#' character. +## Comments are preceded by '##' at the beginning of a line. +## +## The VOTS header defines the metadata associated with the table. In the +## VOTable-Simple format words in all CAPS (followed by ::) refer to the +## corresponding metadata elements in the VOTable specification. For instance +## the DESCRIPTION:: keyword precedes the lines that are used in the VOTable +## element. The COOSYS::, PARAM::, and FIELD:: keywords are +## each followed by a whitespace-delimited table that defines the corresponding +## VOTable elements and attributes. +## +## The actual table data must follow the header and consist of space or tab delimited +## data fields. The chosen delimiter must be used consistently througout the table. +## +##---------------------------------------------------------------------------------- +## Table description, corresponding to the VOTable TABLE::DESCRIPTION element. +##---------------------------------------------------------------------------------- +# DESCRIPTION:: +# This is a sample table that shows a proposed format for generation of tables +# for the C-COSMOS collaboration. This format is compatible with simple 'awk' or +# S-mongo style processing but also allows full self-documentation and conversion +# to more robust data formats (FITS, VOTable, postgres database ingest, etc). +# +##---------------------------------------------------------------------------------- +## Coordinate system specification COOSYS. This is a "future" feature, as the +## current conversion code does not use this field. +##---------------------------------------------------------------------------------- +# COOSYS:: +# ID equinox epoch system +# J2000 J2000. J2000. eq_FK5 +# +##---------------------------------------------------------------------------------- +## Set the TABLE::PARAM values, which are values that apply for the entire table. +##---------------------------------------------------------------------------------- +# PARAM:: +# name datatype value description +# version string 1.1 'Table version' +# date string 2007/12/01 'Table release date' +# +##---------------------------------------------------------------------------------- +## Define the column names via the FIELD element. The attributes 'name', +## 'datatype', 'unit', and 'description' are required. Optional attributes are: +## 'width', 'precision', 'ucd', 'utype', 'ref', and 'type'. +## See http://www.ivoa.net/Documents/REC/VOTable/VOTable-20040811.html#ToC25 for +## the VOTable defintions. +## Allowed values of datatype are: +## boolean, unsignedByte, short, int, long, string, float, double +## Units: (from http://www.ivoa.net/Documents/REC/VOTable/VOTable-20040811.html#sec:unit) +## The quantities in a column of the table may be expressed in some physical +## unit, which is specified by the unit attribute of the FIELD. The syntax of +## the unit string is defined in reference [3]; it is basically written as a +## string without blanks or spaces, where the symbols . or * indicate a +## multiplication, / stands for the division, and no special symbol is +## required for a power. Examples are unit="m2" for m2, unit="cm-2.s-1.keV-1" +## for cm-2s-1keV-1, or unit="erg/s" for erg s-1. The references [3] provide +## also the list of the valid symbols, which is essentially restricted to the +## Systeme International (SI) conventions, plus a few astronomical extensions +## concerning units used for time, angular, distance and energy measurements. +##---------------------------------------------------------------------------------- +# FIELD:: +# name datatype unit ucd description +# id int '' 'meta.id' 'C-COSMOS short identifier number' +# name string '' '' 'C-COSMOS long identifier name' +# ra double deg 'meta.cryptic' 'Right Ascension' +# dec double deg '' Declination +# flux float erg/cm2/s '' Flux +# +##---------------------------------------------------------------------------------- +## Now the actual field data in the order specified by the FIELD:: list. +## The data fields can be separated by tabs or spaces. If using spaces, +## any fields that contain a space must be enclosed in single quotes. +## +12 'CXOCS J193423+022312' 150.01212 2.52322 1.21e-13 +13 'CXOCS J193322+024444' 150.02323 2.54444 1.21e-14 +14 'CXOCS J195555+025555' 150.04444 2.55555 1.21e-15 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/whitespace.dat b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/whitespace.dat new file mode 100644 index 0000000000000000000000000000000000000000..903c6cdc0fd85282b3fc3a99a4669dd73ddbe930 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/t/whitespace.dat @@ -0,0 +1,3 @@ + "quoted colname with tab inside" col2 col3 +val1 "val2 with tab" 2 + val3 val4 3 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_c_reader.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_c_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..089269faa82f8d53ceb6de477ad4263daeefdb19 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_c_reader.py @@ -0,0 +1,1151 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +try: + from cStringIO import StringIO +except ImportError: # cStringIO doesn't exist in Python 3 + from io import BytesIO + StringIO = lambda x: BytesIO(x.encode('ascii')) + +import os +import functools + +from textwrap import dedent + +import pytest +import numpy as np +from numpy import ma + +from ....table import Table, MaskedColumn +from ... import ascii +from ...ascii.core import ParameterError, FastOptionsError +from ...ascii.cparser import CParserError +from ..fastbasic import FastBasic, FastCsv, FastTab, FastCommentedHeader, \ + FastRdb, FastNoHeader +from .common import assert_equal, assert_almost_equal, assert_true +from ....extern import six +from ....extern.six.moves import range + +TRAVIS = os.environ.get('TRAVIS', False) + + +def assert_table_equal(t1, t2, check_meta=False): + assert_equal(len(t1), len(t2)) + assert_equal(t1.colnames, t2.colnames) + if check_meta: + assert_equal(t1.meta, t2.meta) + for name in t1.colnames: + if len(t1) != 0: + assert_equal(t1[name].dtype.kind, t2[name].dtype.kind) + if not isinstance(t1[name], MaskedColumn): + for i, el in enumerate(t1[name]): + try: + if not isinstance(el, six.string_types) and np.isnan(el): + assert_true(not isinstance(t2[name][i], six.string_types) and np.isnan(t2[name][i])) + elif isinstance(el, six.string_types): + assert_equal(el, t2[name][i]) + else: + assert_almost_equal(el, t2[name][i]) + except (TypeError, NotImplementedError): + pass # ignore for now + + +# Use this counter to create a unique filename for each file created in a test +# if this function is called more than once in a single test +_filename_counter = 0 + + +def _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs): + # make sure we have a newline so table can't be misinterpreted as a filename + global _filename_counter + + table += '\n' + reader = Reader(**kwargs) + t1 = reader.read(table) + t2 = reader.read(StringIO(table)) + t3 = reader.read(table.splitlines()) + t4 = ascii.read(table, format=format, guess=False, **kwargs) + t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs) + assert_table_equal(t1, t2, check_meta=check_meta) + assert_table_equal(t2, t3, check_meta=check_meta) + assert_table_equal(t3, t4, check_meta=check_meta) + assert_table_equal(t4, t5, check_meta=check_meta) + + if parallel: + if TRAVIS: + pytest.xfail("Multiprocessing can sometimes fail on Travis CI") + elif os.name == 'nt': + pytest.xfail("Multiprocessing is currently unsupported on Windows") + t6 = ascii.read(table, format=format, guess=False, fast_reader={ + 'parallel': True}, **kwargs) + assert_table_equal(t1, t6, check_meta=check_meta) + + filename = str(tmpdir.join('table{0}.txt'.format(_filename_counter))) + _filename_counter += 1 + + with open(filename, 'wb') as f: + f.write(table.encode('ascii')) + f.flush() + + t7 = ascii.read(filename, format=format, guess=False, **kwargs) + if parallel: + t8 = ascii.read(filename, format=format, guess=False, fast_reader={ + 'parallel': True}, **kwargs) + + assert_table_equal(t1, t7, check_meta=check_meta) + if parallel: + assert_table_equal(t1, t8, check_meta=check_meta) + return t1 + + +@pytest.fixture(scope='function') +def read_basic(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic') + + +@pytest.fixture(scope='function') +def read_csv(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv') + + +@pytest.fixture(scope='function') +def read_tab(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastTab, format='tab') + + +@pytest.fixture(scope='function') +def read_commented_header(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastCommentedHeader, + format='commented_header') + + +@pytest.fixture(scope='function') +def read_rdb(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb') + + +@pytest.fixture(scope='function') +def read_no_header(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastNoHeader, + format='no_header') + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_simple_data(parallel, read_basic): + """ + Make sure the fast reader works with basic input data. + """ + table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +def test_read_types(): + """ + Make sure that the read() function takes filenames, + strings, and lists of strings in addition to file-like objects. + """ + t1 = ascii.read("a b c\n1 2 3\n4 5 6", format='fast_basic', guess=False) + # TODO: also read from file + t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format='fast_basic', guess=False) + t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format='fast_basic', guess=False) + assert_table_equal(t1, t2) + assert_table_equal(t2, t3) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_supplied_names(parallel, read_basic): + """ + If passed as a parameter, names should replace any + column names found in the header. + """ + table = read_basic("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_no_header(parallel, read_basic, read_no_header): + """ + The header should not be read when header_start=None. Unless names is + passed, the column names should be auto-generated. + """ + # Cannot set header_start=None for basic format + with pytest.raises(ValueError): + read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel) + + t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel) + expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('col1', 'col2', 'col3')) + assert_table_equal(t2, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_no_header_supplied_names(parallel, read_basic, read_no_header): + """ + If header_start=None and names is passed as a parameter, header + data should not be read and names should be used instead. + """ + table = read_no_header("A B C\n1 2 3\n4 5 6", + names=('X', 'Y', 'Z'), parallel=parallel) + expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_comment(parallel, read_basic): + """ + Make sure that line comments are ignored by the C reader. + """ + table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_empty_lines(parallel, read_basic): + """ + Make sure that empty lines are ignored by the C reader. + """ + table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_lstrip_whitespace(parallel, read_basic): + """ + Test to make sure the reader ignores whitespace at the beginning of fields. + """ + text = """ + 1, 2, \t3 + A,\t\t B, C + a, b, c +""" + ' \n' + + table = read_basic(text, delimiter=',', parallel=parallel) + expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_rstrip_whitespace(parallel, read_basic): + """ + Test to make sure the reader ignores whitespace at the end of fields. + """ + text = ' 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n' + table = read_basic(text, delimiter=',', parallel=parallel) + expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_conversion(parallel, read_basic): + """ + The reader should try to convert each column to ints. If this fails, the + reader should try to convert to floats. Failing this, it should fall back + to strings. + """ + text = """ +A B C D E +1 a 3 4 5 +2. 1 9 10 -5.3e4 +4 2 -12 .4 six +""" + table = read_basic(text, parallel=parallel) + assert_equal(table['A'].dtype.kind, 'f') + assert table['B'].dtype.kind in ('S', 'U') + assert_equal(table['C'].dtype.kind, 'i') + assert_equal(table['D'].dtype.kind, 'f') + assert table['E'].dtype.kind in ('S', 'U') + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_delimiter(parallel, read_basic): + """ + Make sure that different delimiters work as expected. + """ + text = """ +COL1 COL2 COL3 +1 A -1 +2 B -2 +""" + expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3')) + + for sep in ' ,\t#;': + table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_include_names(parallel, read_basic): + """ + If include_names is not None, the parser should read only those columns in include_names. + """ + table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=['A', 'D'], parallel=parallel) + expected = Table([[1, 5], [4, 8]], names=('A', 'D')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_exclude_names(parallel, read_basic): + """ + If exclude_names is not None, the parser should exclude the columns in exclude_names. + """ + table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=['A', 'D'], parallel=parallel) + expected = Table([[2, 6], [3, 7]], names=('B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_include_exclude_names(parallel, read_basic): + """ + Make sure that include_names is applied before exclude_names if both are specified. + """ + text = """ +A B C D E F G H +1 2 3 4 5 6 7 8 +9 10 11 12 13 14 15 16 +""" + table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'], + exclude_names=['B', 'F'], parallel=parallel) + expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_quoted_fields(parallel, read_basic): + """ + The character quotechar (default '"') should denote the start of a field which can + contain the field delimiter and newlines. + """ + if parallel: + pytest.xfail("Multiprocessing can fail with quoted fields") + text = """ +"A B" C D +1.5 2.1 -37.1 +a b " c + d" +""" + table = read_basic(text, parallel=parallel) + expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'cd']], names=('A B', 'C', 'D')) + assert_table_equal(table, expected) + table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("key,val", [ + ('delimiter', ',,'), # multi-char delimiter + ('comment', '##'), # multi-char comment + ('data_start', None), # data_start=None + ('data_start', -1), # data_start negative + ('quotechar', '##'), # multi-char quote signifier + ('header_start', -1), # negative header_start + ('converters', dict((i + 1, ascii.convert_numpy(np.uint)) for i in range(3))), # passing converters + ('Inputter', ascii.ContinuationLinesInputter), # passing Inputter + ('header_Splitter', ascii.DefaultSplitter), # passing Splitter + ('data_Splitter', ascii.DefaultSplitter)]) +def test_invalid_parameters(key, val): + """ + Make sure the C reader raises an error if passed parameters it can't handle. + """ + with pytest.raises(ParameterError): + FastBasic(**{key: val}).read('1 2 3\n4 5 6') + with pytest.raises(ParameterError): + ascii.read('1 2 3\n4 5 6', + format='fast_basic', guess=False, **{key: val}) + + +def test_invalid_parameters_other(): + with pytest.raises(TypeError): + FastBasic(foo=7).read('1 2 3\n4 5 6') # unexpected argument + with pytest.raises(FastOptionsError): # don't fall back on the slow reader + ascii.read('1 2 3\n4 5 6', format='basic', fast_reader={'foo': 7}) + with pytest.raises(ParameterError): + # Outputter cannot be specified in constructor + FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\n4 5 6') + + +def test_too_many_cols1(): + """ + If a row contains too many columns, the C reader should raise an error. + """ + text = """ +A B C +1 2 3 +4 5 6 +7 8 9 10 +11 12 13 +""" + with pytest.raises(CParserError) as e: + table = FastBasic().read(text) + assert 'CParserError: an error occurred while parsing table data: too many ' \ + 'columns found in line 3 of data' in str(e) + + +def test_too_many_cols2(): + text = """\ +aaa,bbb +1,2, +3,4, +""" + with pytest.raises(CParserError) as e: + table = FastCsv().read(text) + assert 'CParserError: an error occurred while parsing table data: too many ' \ + 'columns found in line 1 of data' in str(e) + + +def test_too_many_cols3(): + text = """\ +aaa,bbb +1,2,, +3,4, +""" + with pytest.raises(CParserError) as e: + table = FastCsv().read(text) + assert 'CParserError: an error occurred while parsing table data: too many ' \ + 'columns found in line 1 of data' in str(e) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_not_enough_cols(parallel, read_csv): + """ + If a row does not have enough columns, the FastCsv reader should add empty + fields while the FastBasic reader should raise an error. + """ + text = """ +A,B,C +1,2,3 +4,5 +6,7,8 +""" + table = read_csv(text, parallel=parallel) + assert table['B'][1] is not ma.masked + assert table['C'][1] is ma.masked + + with pytest.raises(CParserError) as e: + table = FastBasic(delimiter=',').read(text) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_data_end(parallel, read_basic, read_rdb): + """ + The parameter data_end should specify where data reading ends. + """ + text = """ +A B C +1 2 3 +4 5 6 +7 8 9 +10 11 12 +""" + table = read_basic(text, data_end=3, parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + # data_end supports negative indexing + table = read_basic(text, data_end=-2, parallel=parallel) + assert_table_equal(table, expected) + + text = """ +A\tB\tC +N\tN\tS +1\t2\ta +3\t4\tb +5\t6\tc +""" + # make sure data_end works with RDB + table = read_rdb(text, data_end=-1, parallel=parallel) + expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + # positive index + table = read_rdb(text, data_end=3, parallel=parallel) + expected = Table([[1], [2], ['a']], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + # empty table if data_end is too small + table = read_rdb(text, data_end=1, parallel=parallel) + expected = Table([[], [], []], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_inf_nan(parallel, read_basic): + """ + Test that inf and nan-like values are correctly parsed on all platforms. + + Regression test for https://github.com/astropy/astropy/pull/3525 + """ + + text = dedent("""\ + A + nan + +nan + -nan + inf + infinity + +inf + +infinity + -inf + -infinity + """) + + expected = Table({'A': [np.nan, np.nan, np.nan, + np.inf, np.inf, np.inf, np.inf, + -np.inf, -np.inf]}) + + table = read_basic(text, parallel=parallel) + assert table['A'].dtype.kind == 'f' + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fill_values(parallel, read_basic): + """ + Make sure that the parameter fill_values works as intended. If fill_values + is not specified, the default behavior should be to convert '' to 0. + """ + text = """ +A, B, C +, 2, nan +a, -999, -3.4 +nan, 5, -9999 +8, nan, 7.6e12 +""" + table = read_basic(text, delimiter=',', parallel=parallel) + # The empty value in row A should become a masked '0' + assert isinstance(table['A'], MaskedColumn) + assert table['A'][0] is ma.masked + # '0' rather than 0 because there is a string in the column + assert_equal(table['A'].data.data[0], '0') + assert table['A'][1] is not ma.masked + + table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel) + assert isinstance(table['B'], MaskedColumn) + assert table['A'][0] is not ma.masked # empty value unaffected + assert table['C'][2] is not ma.masked # -9999 is not an exact match + assert table['B'][1] is ma.masked + # Numeric because the rest of the column contains numeric data + assert_equal(table['B'].data.data[1], 0.0) + assert table['B'][0] is not ma.masked + + table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel) + # None of the columns should be masked + for name in 'ABC': + assert not isinstance(table[name], MaskedColumn) + + table = read_basic(text, delimiter=',', fill_values=[('', '0', 'A'), + ('nan', '999', 'A', 'C')], parallel=parallel) + assert np.isnan(table['B'][3]) # nan filling skips column B + assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan + assert table['A'][0] is ma.masked + assert table['A'][2] is ma.masked + assert_equal(table['A'].data.data[0], '0') + assert_equal(table['A'].data.data[2], '999') + assert table['C'][0] is ma.masked + assert_almost_equal(table['C'].data.data[0], 999.0) + assert_almost_equal(table['C'][1], -3.4) # column is still of type float + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fill_include_exclude_names(parallel, read_csv): + """ + fill_include_names and fill_exclude_names should filter missing/empty value handling + in the same way that include_names and exclude_names filter output columns. + """ + text = """ +A, B, C +, 1, 2 +3, , 4 +5, 5, +""" + table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel) + assert table['A'][0] is ma.masked + assert table['B'][1] is ma.masked + assert table['C'][2] is not ma.masked # C not in fill_include_names + + table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel) + assert table['C'][2] is ma.masked + assert table['A'][0] is not ma.masked + assert table['B'][1] is not ma.masked # A and B excluded from fill handling + + table = read_csv(text, fill_include_names=['A', 'B'], fill_exclude_names=['B'], parallel=parallel) + assert table['A'][0] is ma.masked + assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names + assert table['C'][2] is not ma.masked + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_many_rows(parallel, read_basic): + """ + Make sure memory reallocation works okay when the number of rows + is large (so that each column string is longer than INITIAL_COL_SIZE). + """ + text = 'A B C\n' + for i in range(500): # create 500 rows + text += ' '.join([str(i) for i in range(3)]) + text += '\n' + + table = read_basic(text, parallel=parallel) + expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_many_columns(parallel, read_basic): + """ + Make sure memory reallocation works okay when the number of columns + is large (so that each header string is longer than INITIAL_HEADER_SIZE). + """ + # create a string with 500 columns and two data rows + text = ' '.join([str(i) for i in range(500)]) + text += ('\n' + text + '\n' + text) + table = read_basic(text, parallel=parallel) + expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)]) + assert_table_equal(table, expected) + + +def test_fast_reader(): + """ + Make sure that ascii.read() works as expected by default and with + fast_reader specified. + """ + text = 'a b c\n1 2 3\n4 5 6' + with pytest.raises(ParameterError): # C reader can't handle regex comment + ascii.read(text, format='fast_basic', guess=False, comment='##') + + # Enable multiprocessing and the fast converter + try: + ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': True, 'use_fast_converter': True}) + except NotImplementedError: + # Might get this on Windows, try without parallel... + if os.name == 'nt': + ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': False, + 'use_fast_converter': True}) + else: + raise + + # Should raise an error if fast_reader has an invalid key + with pytest.raises(FastOptionsError): + ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True}) + + # Use the slow reader instead + ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False) + # Will try the slow reader afterwards by default + ascii.read(text, format='basic', guess=False, comment='##') + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_read_tab(parallel, read_tab): + """ + The fast reader for tab-separated values should not strip whitespace, unlike + the basic reader. + """ + if parallel: + pytest.xfail("Multiprocessing can fail with quoted fields") + text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t ' + table = read_tab(text, parallel=parallel) + assert_equal(table['1'][0], ' a') # preserve line whitespace + assert_equal(table['2'][0], ' b ') # preserve field whitespace + assert table['3'][0] is ma.masked # empty value should be masked + assert_equal(table['2'][1], ' d e') # preserve whitespace in quoted fields + assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_default_data_start(parallel, read_basic): + """ + If data_start is not explicitly passed to read(), data processing should + beginning right after the header. + """ + text = 'ignore this line\na b c\n1 2 3\n4 5 6' + table = read_basic(text, header_start=1, parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_commented_header(parallel, read_commented_header): + """ + The FastCommentedHeader reader should mimic the behavior of the + CommentedHeader by overriding the default header behavior of FastBasic. + """ + text = """ + # A B C + 1 2 3 + 4 5 6 +""" + t1 = read_commented_header(text, parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(t1, expected) + + text = '# first commented line\n # second commented line\n\n' + text + t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel) + assert_table_equal(t2, expected) + t3 = read_commented_header(text, header_start=-1, data_start=0, parallel=parallel) # negative indexing allowed + assert_table_equal(t3, expected) + + text += '7 8 9' + t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel) + expected = Table([[7], [8], [9]], names=('A', 'B', 'C')) + assert_table_equal(t4, expected) + + with pytest.raises(ParameterError): + read_commented_header(text, header_start=-1, data_start=-1, parallel=parallel) # data_start cannot be negative + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_rdb(parallel, read_rdb): + """ + Make sure the FastRdb reader works as expected. + """ + text = """ + +A\tB\tC +1n\tS\t4N +1\t 9\t4.3 +""" + table = read_rdb(text, parallel=parallel) + expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + assert_equal(table['A'].dtype.kind, 'i') + assert table['B'].dtype.kind in ('S', 'U') + assert_equal(table['C'].dtype.kind, 'f') + + with pytest.raises(ValueError) as e: + text = 'A\tB\tC\nN\tS\tN\n4\tb\ta' # C column contains non-numeric data + read_rdb(text, parallel=parallel) + assert 'Column C failed to convert' in str(e) + + with pytest.raises(ValueError) as e: + text = 'A\tB\tC\nN\tN\n1\t2\t3' # not enough types specified + read_rdb(text, parallel=parallel) + assert 'mismatch between number of column names and column types' in str(e) + + with pytest.raises(ValueError) as e: + text = 'A\tB\tC\nN\tN\t5\n1\t2\t3' # invalid type for column C + read_rdb(text, parallel=parallel) + assert 'type definitions do not all match [num](N|S)' in str(e) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_data_start(parallel, read_basic): + """ + Make sure that data parsing begins at data_start (ignoring empty and + commented lines but not taking quoted values into account). + """ + if parallel: + pytest.xfail("Multiprocessing can fail with quoted fields") + text = """ +A B C +1 2 3 +4 5 6 + +7 8 "9 + \t1" +# comment +10 11 12 +""" + table = read_basic(text, data_start=2, parallel=parallel) + expected = Table([[4, 7, 10], [5, 8, 11], [6, 91, 12]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + table = read_basic(text, data_start=3, parallel=parallel) + # ignore empty line + expected = Table([[7, 10], [8, 11], [91, 12]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + with pytest.raises(CParserError) as e: + # tries to begin in the middle of quoted field + read_basic(text, data_start=4, parallel=parallel) + assert 'not enough columns found in line 1 of data' in str(e) + + table = read_basic(text, data_start=5, parallel=parallel) + # ignore commented line + expected = Table([[10], [11], [12]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + text = """ +A B C +1 2 3 +4 5 6 + +7 8 9 +# comment +10 11 12 +""" + # make sure reading works as expected in parallel + table = read_basic(text, data_start=2, parallel=parallel) + expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_quoted_empty_values(parallel, read_basic): + """ + Quoted empty values spanning multiple lines should be treated correctly. + """ + if parallel: + pytest.xfail("Multiprocessing can fail with quoted fields") + text = 'a b c\n1 2 " \n "' + table = read_basic(text, parallel=parallel) + assert table['c'][0] is ma.masked # empty value masked by default + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_csv_comment_default(parallel, read_csv): + """ + Unless the comment parameter is specified, the CSV reader should + not treat any lines as comments. + """ + text = 'a,b,c\n#1,2,3\n4,5,6' + table = read_csv(text, parallel=parallel) + expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_whitespace_before_comment(parallel, read_tab): + """ + Readers that don't strip whitespace from data (Tab, RDB) + should still treat lines with leading whitespace and then + the comment char as comment lines. + """ + text = 'a\tb\tc\n # comment line\n1\t2\t3' + table = read_tab(text, parallel=parallel) + expected = Table([[1], [2], [3]], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_strip_line_trailing_whitespace(parallel, read_basic): + """ + Readers that strip whitespace from lines should ignore + trailing whitespace after the last data value of each + row. + """ + text = 'a b c\n1 2 \n3 4 5' + with pytest.raises(CParserError) as e: + ascii.read(StringIO(text), format='fast_basic', guess=False) + assert 'not enough columns found in line 1' in str(e) + + text = 'a b c\n 1 2 3 \t \n 4 5 6 ' + table = read_basic(text, parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_no_data(parallel, read_basic): + """ + As long as column names are supplied, the C reader + should return an empty table in the absence of data. + """ + table = read_basic('a b c', parallel=parallel) + expected = Table([[], [], []], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + table = read_basic('a b c\n1 2 3', data_start=2, parallel=parallel) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_line_endings(parallel, read_basic, read_commented_header, read_rdb): + """ + Make sure the fast reader accepts CR and CR+LF + as newlines. + """ + text = 'a b c\n1 2 3\n4 5 6\n7 8 9\n' + expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c')) + + for newline in ('\r\n', '\r'): + table = read_basic(text.replace('\n', newline), parallel=parallel) + assert_table_equal(table, expected) + + # Make sure the splitlines() method of FileString + # works with CR/CR+LF line endings + text = '#' + text + for newline in ('\r\n', '\r'): + table = read_commented_header(text.replace('\n', newline), parallel=parallel) + assert_table_equal(table, expected) + + expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'), masked=True) + expected['a'][0] = np.ma.masked + expected['c'][0] = np.ma.masked + text = 'a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n' + for newline in ('\r\n', '\r'): + table = read_rdb(text.replace('\n', newline), parallel=parallel) + assert_table_equal(table, expected) + assert np.all(table == expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_store_comments(parallel, read_basic): + """ + Make sure that the output Table produced by the fast + reader stores any comment lines in its meta attribute. + """ + text = """ +# header comment +a b c +# comment 2 +# comment 3 +1 2 3 +4 5 6 +""" + table = read_basic(text, parallel=parallel, check_meta=True) + assert_equal(table.meta['comments'], + ['header comment', 'comment 2', 'comment 3']) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_empty_quotes(parallel, read_basic): + """ + Make sure the C reader doesn't segfault when the + input data contains empty quotes. [#3407] + """ + table = read_basic('a b\n1 ""\n2 ""', parallel=parallel) + expected = Table([[1, 2], [0, 0]], names=('a', 'b')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fast_tab_with_names(parallel, read_tab): + """ + Make sure the C reader doesn't segfault when the header for the + first column is missing [#3545] + """ + content = """# +\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot +-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" + head = ['A{0}'.format(i) for i in range(28)] + table = read_tab(content, data_start=1, + parallel=parallel, names=head) + + +@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'), + reason='Environment variable TEST_READ_HUGE_FILE must be ' + 'defined to run this test') +def test_read_big_table(tmpdir): + """Test reading of a huge file. + + This test generates a huge CSV file (~2.3Gb) before reading it (see + https://github.com/astropy/astropy/pull/5319). The test is run only if the + environment variable ``TEST_READ_HUGE_FILE`` is defined. Note that running + the test requires quite a lot of memory (~18Gb when reading the file) !! + + """ + NB_ROWS = 250000 + NB_COLS = 500 + filename = str(tmpdir.join("big_table.csv")) + + print("Creating a {} rows table ({} columns).".format(NB_ROWS, NB_COLS)) + data = np.random.random(NB_ROWS) + t = Table(data=[data]*NB_COLS, names=[str(i) for i in range(NB_COLS)]) + data = None + + print("Saving the table to {}".format(filename)) + t.write(filename, format='ascii.csv', overwrite=True) + t = None + + print("Counting the number of lines in the csv, it should be {}" + " + 1 (header).".format(NB_ROWS)) + assert sum(1 for line in open(filename)) == NB_ROWS + 1 + + print("Reading the file with astropy.") + t = Table.read(filename, format='ascii.csv', fast_reader=True) + assert len(t) == NB_ROWS + + +@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'), + reason='Environment variable TEST_READ_HUGE_FILE must be ' + 'defined to run this test') +def test_read_big_table2(tmpdir): + """Test reading of a file with a huge column. + """ + # (2**32 // 2) : max value for int + # // 10 : we use a value for rows that have 10 chars (1e9) + # + 5 : add a few lines so the length cannot be stored by an int + NB_ROWS = (2**32 // 2) // 10 + 5 + filename = str(tmpdir.join("big_table.csv")) + + print("Creating a {} rows table.".format(NB_ROWS)) + data = np.full(2**32 // 2 // 10 + 5, int(1e9), dtype=np.int32) + t = Table(data=[data], names=['a'], copy=False) + + print("Saving the table to {}".format(filename)) + t.write(filename, format='ascii.csv', overwrite=True) + t = None + + print("Counting the number of lines in the csv, it should be {}" + " + 1 (header).".format(NB_ROWS)) + assert sum(1 for line in open(filename)) == NB_ROWS + 1 + + print("Reading the file with astropy.") + t = Table.read(filename, format='ascii.csv', fast_reader=True) + assert len(t) == NB_ROWS + + +# fast_reader configurations: False| 'use_fast_converter'=False|True +@pytest.mark.parametrize('reader', [0, 1, 2]) +# catch Windows environment since we cannot use _read() with custom fast_reader +@pytest.mark.parametrize("parallel", [False, True]) +def test_data_out_of_range(parallel, reader): + """ + Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|) + shall be returned as 0 and +-inf respectively by the C parser, just like + the Python parser. + Test fast converter only to nominal accuracy. + """ + if os.name == 'nt': + pytest.xfail(reason="Multiprocessing is currently unsupported on Windows") + # Python reader and strtod() are expected to return precise results + rtol = 1.e-30 + if reader > 1: + rtol = 1.e-15 + # passing fast_reader dict with parametrize does not work! + if reader > 0: + fast_reader = {'parallel': parallel, 'use_fast_converter': reader > 1} + else: + fast_reader = False + if parallel: + if reader < 1: + pytest.skip("Multiprocessing only available in fast reader") + elif TRAVIS: + pytest.xfail("Multiprocessing can sometimes fail on Travis CI") + + fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345'] + values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf]) + t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, + fast_reader=fast_reader) + read_values = np.array([col[0] for col in t.itercols()]) + assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324) + + # test some additional corner cases + fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305', '0.4e-324', + '2500e-327', ' 0.0000000000000000000001024E+330'] + values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308]) + t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, + fast_reader=fast_reader) + read_values = np.array([col[0] for col in t.itercols()]) + assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324) + + # test corner cases again with non-standard exponent_style (auto-detection) + if reader < 2: + pytest.skip("Fortran exponent style only available in fast converter") + fast_reader.update({'exponent_style': 'A'}) + fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305', '0.2e-323', + '2500-327', ' 0.0000000000000000000001024Q+330'] + t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, + fast_reader=fast_reader) + read_values = np.array([col[0] for col in t.itercols()]) + assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324) + + +# catch Windows environment since we cannot use _read() with custom fast_reader +@pytest.mark.parametrize("parallel", [True, False]) +def test_int_out_of_range(parallel): + """ + Integer numbers outside int range shall be returned as string columns + consistent with the standard (Python) parser (no 'upcasting' to float). + """ + if os.name == 'nt': + pytest.xfail(reason="Multiprocessing is currently unsupported on Windows") + + imin = np.iinfo(np.int).min+1 + imax = np.iinfo(np.int).max-1 + huge = '{:d}'.format(imax+2) + + text = 'P M S\n {:d} {:d} {:s}'.format(imax, imin, huge) + expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S')) + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel}) + assert_table_equal(table, expected) + + # check with leading zeroes to make sure strtol does not read them as octal + text = 'P M S\n000{:d} -0{:d} 00{:s}'.format(imax, -imin, huge) + expected = Table([[imax], [imin], ['00'+huge]], names=('P', 'M', 'S')) + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel}) + assert_table_equal(table, expected) + + # mixed columns should be returned as float, but if the out-of-range integer + # shows up first, it will produce a string column - with both readers + pytest.xfail("Integer fallback depends on order of rows") + text = 'A B\n 12.3 {0:d}9\n {0:d}9 45.6e7'.format(imax) + expected = Table([[12.3, 10.*imax], [10.*imax, 4.56e8]], + names=('A', 'B')) + + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel}) + assert_table_equal(table, expected) + table = ascii.read(text, format='basic', guess=False, fast_reader=False) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fortran_reader(parallel): + """ + Make sure that ascii.read() can read Fortran-style exponential notation + using the fast_reader. + """ + if os.name == 'nt': + pytest.xfail(reason="Multiprocessing is currently unsupported on Windows") + + text = 'A B C\n100.01{:s}+99 2.0 3\n 4.2{:s}-1 5.0{:s}-1 0.6{:s}4' + expected = Table([[1.0001e101, 0.42], [2, 0.5], [3.0, 6000]], + names=('A', 'B', 'C')) + + expstyles = {'e': 4*('E'), 'D': ('D', 'd', 'd', 'D'), 'Q': 2*('q', 'Q'), + 'fortran': ('D', 'E', 'Q', 'd')} + + # C strtod (not-fast converter) can't handle Fortran exp + with pytest.raises(FastOptionsError) as e: + ascii.read(text.format(*(4*('D'))), format='basic', guess=False, + fast_reader={'use_fast_converter': False, + 'parallel': parallel, 'exponent_style': 'D'}) + assert 'fast_reader: exponent_style requires use_fast_converter' in str(e) + + # enable multiprocessing and the fast converter + # iterate over all style-exponent combinations + for s, c in expstyles.items(): + table = ascii.read(text.format(*c), format='basic', guess=False, + fast_reader={'parallel': parallel, + 'exponent_style': s}) + assert_table_equal(table, expected) + + # mixes and triple-exponents without any character using autodetect option + text = 'A B C\n1.0001+101 2.0E0 3\n.42d0 0.5 6.+003' + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel, 'exponent_style': 'fortran'}) + assert_table_equal(table, expected) + + # additional corner-case checks + text = 'A B C\n1.0001+101 2.0+000 3\n0.42+000 0.5 6000.-000' + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel, 'exponent_style': 'fortran'}) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fortran_invalid_exp(parallel): + """ + Test Fortran-style exponential notation in the fast_reader with invalid + exponent-like patterns (no triple-digits) to make sure they are returned + as strings instead, as with the standard C parser. + """ + if os.name == 'nt': + pytest.xfail(reason="Multiprocessing is currently unsupported on Windows") + if parallel and TRAVIS: + pytest.xfail("Multiprocessing can sometimes fail on Travis CI") + + fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.', + '2', '4.56e-2.3', '8000', '4.2-122'] + values = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3, + 2, '4.56e-2.3', 8000, 4.2e-122] + + t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, + fast_reader={'parallel': parallel, 'exponent_style': 'A'}) + read_values = [col[0] for col in t.itercols()] + assert read_values == values diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_c_reader.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_c_reader.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37454285ac0a65d3fc11d86108bd44684716d12a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_c_reader.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_cds_header_from_readme.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_cds_header_from_readme.py new file mode 100644 index 0000000000000000000000000000000000000000..43c58f6b2abc7590199c7971418188ed1460aa21 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_cds_header_from_readme.py @@ -0,0 +1,155 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from ... import ascii +from .common import (assert_equal, assert_almost_equal, has_isnan, + setup_function, teardown_function) + + +def read_table1(readme, data): + reader = ascii.Cds(readme) + return reader.read(data) + + +def read_table2(readme, data): + reader = ascii.get_reader(Reader=ascii.Cds, readme=readme) + reader.outputter = ascii.TableOutputter() + return reader.read(data) + + +def read_table3(readme, data): + return ascii.read(data, readme=readme) + + +def test_description(): + readme = 't/cds/description/ReadMe' + data = 't/cds/description/table.dat' + for read_table in (read_table1, read_table2, read_table3): + table = read_table(readme, data) + assert_equal(len(table), 2) + assert_equal(table['Cluster'].description, 'Cluster name') + assert_equal(table['Star'].description, '') + assert_equal(table['Wave'].description, 'wave? Wavelength in Angstroms') + assert_equal(table['El'].description, 'a') + assert_equal(table['ion'].description, '- Ionization stage (1 for neutral element)') + assert_equal(table['EW'].description, 'Equivalent width (in mA)') + assert_equal(table['Q'].description, 'DAOSPEC quality parameter Q(large values are bad)') + + +def test_multi_header(): + readme = 't/cds/multi/ReadMe' + data = 't/cds/multi/lhs2065.dat' + for read_table in (read_table1, read_table2, read_table3): + table = read_table(readme, data) + assert_equal(len(table), 18) + assert_almost_equal(table['Lambda'][-1], 6479.32) + assert_equal(table['Fnu'][-1], '0.285937') + data = 't/cds/multi/lp944-20.dat' + for read_table in (read_table1, read_table2, read_table3): + table = read_table(readme, data) + assert_equal(len(table), 18) + assert_almost_equal(table['Lambda'][0], 6476.09) + assert_equal(table['Fnu'][-1], '0.489005') + + +def test_glob_header(): + readme = 't/cds/glob/ReadMe' + data = 't/cds/glob/lmxbrefs.dat' + for read_table in (read_table1, read_table2, read_table3): + table = read_table(readme, data) + assert_equal(len(table), 291) + assert_equal(table['Name'][-1], 'J1914+0953') + assert_equal(table['BibCode'][-2], '2005A&A...432..235R') + + +def test_header_from_readme(): + r = ascii.Cds("t/vizier/ReadMe") + table = r.read("t/vizier/table1.dat") + assert len(r.data.data_lines) == 15 + assert len(table) == 15 + assert len(table.keys()) == 18 + Bmag = [14.79, + 15.00, + 14.80, + 12.38, + 12.36, + 12.24, + 13.75, + 13.65, + 13.41, + 11.59, + 11.68, + 11.53, + 13.92, + 14.03, + 14.18] + for i, val in enumerate(table.field('Bmag')): + assert val == Bmag[i] + + table = r.read("t/vizier/table5.dat") + assert len(r.data.data_lines) == 49 + assert len(table) == 49 + assert len(table.keys()) == 10 + Q = [0.289, + 0.325, + 0.510, + 0.577, + 0.539, + 0.390, + 0.957, + 0.736, + 1.435, + 1.117, + 1.473, + 0.808, + 1.416, + 2.209, + 0.617, + 1.046, + 1.604, + 1.419, + 1.431, + 1.183, + 1.210, + 1.005, + 0.706, + 0.665, + 0.340, + 0.323, + 0.391, + 0.280, + 0.343, + 0.369, + 0.495, + 0.828, + 1.113, + 0.499, + 1.038, + 0.260, + 0.863, + 1.638, + 0.479, + 0.232, + 0.627, + 0.671, + 0.371, + 0.851, + 0.607, + -9.999, + 1.958, + 1.416, + 0.949] + if has_isnan: + from .common import isnan + for i, val in enumerate(table.field('Q')): + if isnan(val): + # text value for a missing value in that table + assert Q[i] == -9.999 + else: + assert val == Q[i] + + +if __name__ == "__main__": # run from main directory; not from test/ + test_header_from_readme() + test_multi_header() + test_glob_header() + test_description() diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_cds_header_from_readme.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_cds_header_from_readme.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d0af0816d5fa501e0a8bef6e6cedd28f16d11c5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_cds_header_from_readme.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_compressed.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_compressed.py new file mode 100644 index 0000000000000000000000000000000000000000..6d2421321031c0dd0f0993df335445a18e6377ff --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_compressed.py @@ -0,0 +1,55 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +import os +import sys + +import pytest +import numpy as np + +from .. import read + +ROOT = os.path.abspath(os.path.dirname(__file__)) + + +try: + import bz2 # pylint: disable=W0611 +except ImportError: + HAS_BZ2 = False +else: + HAS_BZ2 = True + +try: + if sys.version_info >= (3, 3, 0): + import lzma + else: + from backports import lzma # pylint: disable=W0611 +except ImportError: + HAS_XZ = False +else: + HAS_XZ = True + + +@pytest.mark.parametrize('filename', ['t/daophot.dat.gz', 't/latex1.tex.gz', + 't/short.rdb.gz']) +def test_gzip(filename): + t_comp = read(os.path.join(ROOT, filename)) + t_uncomp = read(os.path.join(ROOT, filename.replace('.gz', ''))) + assert t_comp.dtype.names == t_uncomp.dtype.names + assert np.all(t_comp.as_array() == t_uncomp.as_array()) + + +@pytest.mark.xfail('not HAS_BZ2') +@pytest.mark.parametrize('filename', ['t/short.rdb.bz2', 't/ipac.dat.bz2']) +def test_bzip2(filename): + t_comp = read(os.path.join(ROOT, filename)) + t_uncomp = read(os.path.join(ROOT, filename.replace('.bz2', ''))) + assert t_comp.dtype.names == t_uncomp.dtype.names + assert np.all(t_comp.as_array() == t_uncomp.as_array()) + + +@pytest.mark.xfail('not HAS_XZ') +@pytest.mark.parametrize('filename', ['t/short.rdb.xz', 't/ipac.dat.xz']) +def test_xz(filename): + t_comp = read(os.path.join(ROOT, filename)) + t_uncomp = read(os.path.join(ROOT, filename.replace('.xz', ''))) + assert t_comp.dtype.names == t_uncomp.dtype.names + assert np.all(t_comp.as_array() == t_uncomp.as_array()) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_compressed.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_compressed.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b41080bfb30e8504b1dc18885b352f797f1eabff Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_compressed.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_connect.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_connect.py new file mode 100644 index 0000000000000000000000000000000000000000..bbc56dea89f2a33a04b32c59c34c4358269fb38b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_connect.py @@ -0,0 +1,140 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +import os + +import pytest + +from ....table import Table, Column + +ROOT = os.path.abspath(os.path.dirname(__file__)) + +files = ['t/cds.dat', 't/ipac.dat', 't/daophot.dat', 't/latex1.tex', + 't/simple_csv.csv'] + +# Check to see if the BeautifulSoup dependency is present. + +try: + from bs4 import BeautifulSoup # pylint: disable=W0611 + HAS_BEAUTIFUL_SOUP = True +except ImportError: + HAS_BEAUTIFUL_SOUP = False + +if HAS_BEAUTIFUL_SOUP: + files.append('t/html.html') + + +@pytest.mark.parametrize('filename', files) +def test_read_generic(filename): + Table.read(os.path.join(ROOT, filename), format='ascii') + + +def test_write_generic(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + t.write(str(tmpdir.join("test")), format='ascii') + + +def test_read_ipac(): + Table.read(os.path.join(ROOT, 't/ipac.dat'), format='ipac') + + +def test_read_cds(): + Table.read(os.path.join(ROOT, 't/cds.dat'), format='cds') + + +def test_read_dapphot(): + Table.read(os.path.join(ROOT, 't/daophot.dat'), format='daophot') + + +def test_read_latex(): + Table.read(os.path.join(ROOT, 't/latex1.tex'), format='latex') + + +def test_read_latex_noformat(): + Table.read(os.path.join(ROOT, 't/latex1.tex')) + + +def test_write_latex(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.tex")) + t.write(path, format='latex') + + +def test_write_latex_noformat(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.tex")) + t.write(path) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_read_html(): + Table.read(os.path.join(ROOT, 't/html.html'), format='html') + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_read_html_noformat(): + Table.read(os.path.join(ROOT, 't/html.html')) + + +def test_write_html(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.html")) + t.write(path, format='html') + + +def test_write_html_noformat(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.html")) + t.write(path) + + +def test_read_rdb(): + Table.read(os.path.join(ROOT, 't/short.rdb'), format='rdb') + + +def test_read_rdb_noformat(): + Table.read(os.path.join(ROOT, 't/short.rdb')) + + +def test_write_rdb(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.rdb")) + t.write(path, format='rdb') + + +def test_write_rdb_noformat(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.rdb")) + t.write(path) + + +def test_read_csv(): + '''If properly registered, filename should be sufficient to specify format + + #3189 + ''' + Table.read(os.path.join(ROOT, 't/simple_csv.csv')) + + +def test_write_csv(tmpdir): + '''If properly registered, filename should be sufficient to specify format + + #3189 + ''' + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.csv")) + t.write(path) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_connect.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_connect.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f287faca8f2344b7b0271d7e5d14a91fcdeb88b0 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_connect.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_ecsv.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_ecsv.py new file mode 100644 index 0000000000000000000000000000000000000000..5869e8b4163f657acf38b6e5f5d1a8ae14764d73 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_ecsv.py @@ -0,0 +1,418 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module tests some of the methods related to the ``ECSV`` +reader/writer. + +Requires `pyyaml `_ to be installed. +""" +import os +import copy +import sys + +import pytest +import numpy as np + +from ....table import Table, Column, QTable, NdarrayMixin +from ....table.table_helpers import simple_table +from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation +from ....time import Time, TimeDelta +from ....tests.helper import quantity_allclose +from ....units.quantity import QuantityInfo + +from ....extern.six.moves import StringIO +from ..ecsv import DELIMITERS +from ... import ascii +from .... import units as u + +try: + import yaml # pylint: disable=W0611 + HAS_YAML = True +except ImportError: + HAS_YAML = False + +DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', + 'uint64', 'float16', 'float32', 'float64', 'float128', + 'str'] +if os.name == 'nt' or sys.maxsize <= 2**32: + DTYPES.remove('float128') + +T_DTYPES = Table() + +for dtype in DTYPES: + if dtype == 'bool': + data = np.array([False, True, False]) + elif dtype == 'str': + data = np.array(['ab 0', 'ab, 1', 'ab2']) + else: + data = np.arange(3, dtype=dtype) + c = Column(data, unit='m / s', description='descr_' + dtype, + meta={'meta ' + dtype: 1}) + T_DTYPES[dtype] = c + +T_DTYPES.meta['comments'] = ['comment1', 'comment2'] + +# Corresponds to simple_table() +SIMPLE_LINES = ['# %ECSV 0.9', + '# ---', + '# datatype:', + '# - {name: a, datatype: int64}', + '# - {name: b, datatype: float64}', + '# - {name: c, datatype: string}', + '# schema: astropy-2.0', + 'a b c', + '1 1.0 c', + '2 2.0 d', + '3 3.0 e'] + + +@pytest.mark.skipif('not HAS_YAML') +def test_write_simple(): + """ + Write a simple table with common types. This shows the compact version + of serialization with one line per column. + """ + t = simple_table() + + out = StringIO() + t.write(out, format='ascii.ecsv') + assert out.getvalue().splitlines() == SIMPLE_LINES + + +@pytest.mark.skipif('not HAS_YAML') +def test_write_full(): + """ + Write a full-featured table with common types and explicitly checkout output + """ + t = T_DTYPES['bool', 'int64', 'float64', 'str'] + lines = ['# %ECSV 0.9', + '# ---', + '# datatype:', + '# - name: bool', + '# unit: m / s', + '# datatype: bool', + '# description: descr_bool', + '# meta: {meta bool: 1}', + '# - name: int64', + '# unit: m / s', + '# datatype: int64', + '# description: descr_int64', + '# meta: {meta int64: 1}', + '# - name: float64', + '# unit: m / s', + '# datatype: float64', + '# description: descr_float64', + '# meta: {meta float64: 1}', + '# - name: str', + '# unit: m / s', + '# datatype: string', + '# description: descr_str', + '# meta: {meta str: 1}', + '# meta: !!omap', + '# - comments: [comment1, comment2]', + '# schema: astropy-2.0', + 'bool int64 float64 str', + 'False 0 0.0 "ab 0"', + 'True 1 1.0 "ab, 1"', + 'False 2 2.0 ab2'] + + out = StringIO() + t.write(out, format='ascii.ecsv') + assert out.getvalue().splitlines() == lines + + +@pytest.mark.skipif('not HAS_YAML') +def test_write_read_roundtrip(): + """ + Write a full-featured table with all types and see that it round-trips on + readback. Use both space and comma delimiters. + """ + t = T_DTYPES + for delimiter in DELIMITERS: + out = StringIO() + t.write(out, format='ascii.ecsv', delimiter=delimiter) + + t2s = [Table.read(out.getvalue(), format='ascii.ecsv'), + Table.read(out.getvalue(), format='ascii'), + ascii.read(out.getvalue()), + ascii.read(out.getvalue(), format='ecsv', guess=False), + ascii.read(out.getvalue(), format='ecsv')] + for t2 in t2s: + assert t.meta == t2.meta + for name in t.colnames: + assert t[name].attrs_equal(t2[name]) + assert np.all(t[name] == t2[name]) + + +@pytest.mark.skipif('not HAS_YAML') +def test_bad_delimiter(): + """ + Passing a delimiter other than space or comma gives an exception + """ + out = StringIO() + with pytest.raises(ValueError) as err: + T_DTYPES.write(out, format='ascii.ecsv', delimiter='|') + assert 'only space and comma are allowed' in str(err.value) + + +@pytest.mark.skipif('not HAS_YAML') +def test_bad_header_start(): + """ + Bad header without initial # %ECSV x.x + """ + lines = copy.copy(SIMPLE_LINES) + lines[0] = '# %ECV 0.9' + with pytest.raises(ascii.InconsistentTableError): + Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) + + +@pytest.mark.skipif('not HAS_YAML') +def test_bad_delimiter_input(): + """ + Illegal delimiter in input + """ + lines = copy.copy(SIMPLE_LINES) + lines.insert(2, '# delimiter: |') + with pytest.raises(ValueError) as err: + Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) + assert 'only space and comma are allowed' in str(err.value) + + +@pytest.mark.skipif('not HAS_YAML') +def test_multidim_input(): + """ + Multi-dimensional column in input + """ + t = Table([np.arange(4).reshape(2, 2)], names=['a']) + out = StringIO() + with pytest.raises(ValueError) as err: + t.write(out, format='ascii.ecsv') + assert 'ECSV format does not support multidimensional column' in str(err.value) + + +@pytest.mark.skipif('not HAS_YAML') +def test_round_trip_empty_table(): + """Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)""" + t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c']) + out = StringIO() + t.write(out, format='ascii.ecsv') + t2 = Table.read(out.getvalue(), format='ascii.ecsv') + assert t.dtype == t2.dtype + assert len(t2) == 0 + + +@pytest.mark.skipif('not HAS_YAML') +def test_csv_ecsv_colnames_mismatch(): + """ + Test that mismatch in column names from normal CSV header vs. + ECSV YAML header raises the expected exception. + """ + lines = copy.copy(SIMPLE_LINES) + header_index = lines.index('a b c') + lines[header_index] = 'a b d' + with pytest.raises(ValueError) as err: + ascii.read(lines, format='ecsv') + assert "column names from ECSV header ['a', 'b', 'c']" in str(err) + + +@pytest.mark.skipif('not HAS_YAML') +def test_regression_5604(): + """ + See https://github.com/astropy/astropy/issues/5604 for more. + """ + t = Table() + t.meta = {"foo": 5*u.km, "foo2": u.s} + t["bar"] = [7]*u.km + + out = StringIO() + t.write(out, format="ascii.ecsv") + + assert '!astropy.units.Unit' in out.getvalue() + assert '!astropy.units.Quantity' in out.getvalue() + + +def assert_objects_equal(obj1, obj2, attrs, compare_class=True): + if compare_class: + assert obj1.__class__ is obj2.__class__ + + info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description'] + for attr in attrs + info_attrs: + a1 = obj1 + a2 = obj2 + for subattr in attr.split('.'): + try: + a1 = getattr(a1, subattr) + a2 = getattr(a2, subattr) + except AttributeError: + a1 = a1[subattr] + a2 = a2[subattr] + + if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f': + assert quantity_allclose(a1, a2, rtol=1e-10) + else: + assert np.all(a1 == a2) + + +el = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km) +sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', + obstime='J1990.5') +scc = sc.copy() +scc.representation = 'cartesian' +tm = Time([51000.5, 51001.5], format='mjd', scale='tai', precision=5, location=el[0]) +tm2 = Time(tm, format='iso') +tm3 = Time(tm, location=el) +tm3.info.serialize_method['ecsv'] = 'jd1_jd2' + + +mixin_cols = { + 'tm': tm, + 'tm2': tm2, + 'tm3': tm3, + 'dt': TimeDelta([1, 2] * u.day), + 'sc': sc, + 'scc': scc, + 'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4', + obstime=['J1990.5'] * 2), + 'q': [1, 2] * u.m, + 'lat': Latitude([1, 2] * u.deg), + 'lon': Longitude([1, 2] * u.deg, wrap_angle=180.*u.deg), + 'ang': Angle([1, 2] * u.deg), + 'el': el, + # 'nd': NdarrayMixin(el) # not supported yet +} + +time_attrs = ['value', 'shape', 'format', 'scale', 'precision', + 'in_subfmt', 'out_subfmt', 'location'] +compare_attrs = { + 'c1': ['data'], + 'c2': ['data'], + 'tm': time_attrs, + 'tm2': time_attrs, + 'tm3': time_attrs, + 'dt': ['shape', 'value', 'format', 'scale'], + 'sc': ['ra', 'dec', 'representation', 'frame.name'], + 'scc': ['x', 'y', 'z', 'representation', 'frame.name'], + 'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'], + 'q': ['value', 'unit'], + 'lon': ['value', 'unit', 'wrap_angle'], + 'lat': ['value', 'unit'], + 'ang': ['value', 'unit'], + 'el': ['x', 'y', 'z', 'ellipsoid'], + 'nd': ['x', 'y', 'z'], +} + + +@pytest.mark.skipif('not HAS_YAML') +def test_ecsv_mixins_ascii_read_class(): + """Ensure that ascii.read(ecsv_file) returns the correct class + (QTable if any Quantity subclasses, Table otherwise). + """ + # Make a table with every mixin type except Quantities + t = QTable({name: col for name, col in mixin_cols.items() + if not isinstance(col.info, QuantityInfo)}) + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = ascii.read(out.getvalue(), format='ecsv') + assert type(t2) is Table + + # Add a single quantity column + t['lon'] = mixin_cols['lon'] + + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = ascii.read(out.getvalue(), format='ecsv') + assert type(t2) is QTable + + +@pytest.mark.skipif('not HAS_YAML') +def test_ecsv_mixins_qtable_to_table(): + """Test writing as QTable and reading as Table. Ensure correct classes + come out. + """ + names = sorted(mixin_cols) + + t = QTable([mixin_cols[name] for name in names], names=names) + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = Table.read(out.getvalue(), format='ascii.ecsv') + + assert t.colnames == t2.colnames + + for name, col in t.columns.items(): + col2 = t2[name] + attrs = compare_attrs[name] + compare_class = True + + if isinstance(col.info, QuantityInfo): + # Downgrade Quantity to Column + unit + assert type(col2) is Column + attrs = ['unit'] # Other attrs are lost + compare_class = False + + assert_objects_equal(col, col2, attrs, compare_class) + + +@pytest.mark.skipif('not HAS_YAML') +@pytest.mark.parametrize('table_cls', (Table, QTable)) +def test_ecsv_mixins_as_one(table_cls): + """Test write/read all cols at once and validate intermediate column names""" + names = sorted(mixin_cols) + + serialized_names = ['ang', + 'dt', + 'el.x', 'el.y', 'el.z', + 'lat', + 'lon', + 'q', + 'sc.ra', 'sc.dec', + 'scc.x', 'scc.y', 'scc.z', + 'scd.ra', 'scd.dec', 'scd.distance', + 'scd.obstime', + 'tm', # serialize_method is formatted_value + 'tm2', # serialize_method is formatted_value + 'tm3.jd1', 'tm3.jd2', # serialize is jd1_jd2 + 'tm3.location.x', 'tm3.location.y', 'tm3.location.z'] + + t = table_cls([mixin_cols[name] for name in names], names=names) + + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = table_cls.read(out.getvalue(), format='ascii.ecsv') + + assert t.colnames == t2.colnames + + # Read as a ascii.basic table (skip all the ECSV junk) + t3 = table_cls.read(out.getvalue(), format='ascii.basic') + assert t3.colnames == serialized_names + + +@pytest.mark.skipif('not HAS_YAML') +@pytest.mark.parametrize('name_col', list(mixin_cols.items())) +@pytest.mark.parametrize('table_cls', (Table, QTable)) +def test_ecsv_mixins_per_column(table_cls, name_col): + """Test write/read one col at a time and do detailed validation""" + name, col = name_col + + c = [1.0, 2.0] + t = table_cls([c, col, c], names=['c1', name, 'c2']) + t[name].info.description = 'description' + + if not t.has_mixin_columns: + pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)') + + if isinstance(t[name], NdarrayMixin): + pytest.xfail('NdarrayMixin not supported') + + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = table_cls.read(out.getvalue(), format='ascii.ecsv') + + assert t.colnames == t2.colnames + + for colname in t.colnames: + assert_objects_equal(t[colname], t2[colname], compare_attrs[colname]) + + # Special case to make sure Column type doesn't leak into Time class data + if name.startswith('tm'): + assert t2[name]._time.jd1.__class__ is np.ndarray + assert t2[name]._time.jd2.__class__ is np.ndarray diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_ecsv.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_ecsv.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1aaf035d544aebbf4af00aea72efe6fdcd74a4fe Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_ecsv.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_fixedwidth.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_fixedwidth.py new file mode 100644 index 0000000000000000000000000000000000000000..c51324daba8fcf8566abd327deb07e44358354e0 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_fixedwidth.py @@ -0,0 +1,481 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import pytest + +from ....extern.six.moves import cStringIO as StringIO +from ... import ascii +from ..core import InconsistentTableError +from .common import (assert_equal, assert_almost_equal, + setup_function, teardown_function) + + +def assert_equal_splitlines(arg1, arg2): + assert_equal(arg1.splitlines(), arg2.splitlines()) + + +def test_read_normal(): + """Nice, typical fixed format table""" + table = """ +# comment (with blank line above) +| Col1 | Col2 | +| 1.2 | "hello" | +| 2.4 |'s worlds| +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth) + dat = reader.read(table) + assert_equal(dat.colnames, ['Col1', 'Col2']) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hello"') + assert_equal(dat[1][1], "'s worlds") + + +def test_read_normal_names(): + """Nice, typical fixed format table with col names provided""" + table = """ +# comment (with blank line above) +| Col1 | Col2 | +| 1.2 | "hello" | +| 2.4 |'s worlds| +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth, + names=('name1', 'name2')) + dat = reader.read(table) + assert_equal(dat.colnames, ['name1', 'name2']) + assert_almost_equal(dat[1][0], 2.4) + + +def test_read_normal_names_include(): + """Nice, typical fixed format table with col names provided""" + table = """ +# comment (with blank line above) +| Col1 | Col2 | Col3 | +| 1.2 | "hello" | 3 | +| 2.4 |'s worlds| 7 | +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth, + names=('name1', 'name2', 'name3'), + include_names=('name1', 'name3')) + dat = reader.read(table) + assert_equal(dat.colnames, ['name1', 'name3']) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], 3) + + +def test_read_normal_exclude(): + """Nice, typical fixed format table with col name excluded""" + table = """ +# comment (with blank line above) +| Col1 | Col2 | +| 1.2 | "hello" | +| 2.4 |'s worlds| +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth, + exclude_names=('Col1',)) + dat = reader.read(table) + assert_equal(dat.colnames, ['Col2']) + assert_equal(dat[1][0], "'s worlds") + + +def test_read_weird(): + """Weird input table with data values chopped by col extent """ + table = """ + Col1 | Col2 | + 1.2 "hello" + 2.4 sdf's worlds +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth) + dat = reader.read(table) + assert_equal(dat.colnames, ['Col1', 'Col2']) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hel') + assert_equal(dat[1][1], "df's wo") + + +def test_read_double(): + """Table with double delimiters""" + table = """ +|| Name || Phone || TCP|| +| John | 555-1234 |192.168.1.10X| +| Mary | 555-2134 |192.168.1.12X| +| Bob | 555-4527 | 192.168.1.9X| +""" + dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_space_delimiter(): + """Table with space delimiter""" + table = """ + Name --Phone- ----TCP----- + John 555-1234 192.168.1.10 + Mary 555-2134 192.168.1.12 + Bob 555-4527 192.168.1.9 +""" + dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False, + delimiter=' ') + assert_equal(tuple(dat.dtype.names), ('Name', '--Phone-', '----TCP-----')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_no_header_autocolumn(): + """Table with no header row and auto-column naming""" + table = """ +| John | 555-1234 |192.168.1.10| +| Mary | 555-2134 |192.168.1.12| +| Bob | 555-4527 | 192.168.1.9| +""" + dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False, + header_start=None, data_start=0) + assert_equal(tuple(dat.dtype.names), ('col1', 'col2', 'col3')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_no_header_names(): + """Table with no header row and with col names provided. Second + and third rows also have hanging spaces after final |.""" + table = """ +| John | 555-1234 |192.168.1.10| +| Mary | 555-2134 |192.168.1.12| +| Bob | 555-4527 | 192.168.1.9| +""" + dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False, + header_start=None, data_start=0, + names=('Name', 'Phone', 'TCP')) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_no_header_autocolumn_NoHeader(): + """Table with no header row and auto-column naming""" + table = """ +| John | 555-1234 |192.168.1.10| +| Mary | 555-2134 |192.168.1.12| +| Bob | 555-4527 | 192.168.1.9| +""" + dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader) + assert_equal(tuple(dat.dtype.names), ('col1', 'col2', 'col3')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_no_header_names_NoHeader(): + """Table with no header row and with col names provided. Second + and third rows also have hanging spaces after final |.""" + table = """ +| John | 555-1234 |192.168.1.10| +| Mary | 555-2134 |192.168.1.12| +| Bob | 555-4527 | 192.168.1.9| +""" + dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader, + names=('Name', 'Phone', 'TCP')) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_col_starts(): + """Table with no delimiter with column start and end values specified.""" + table = """ +# 5 9 17 18 28 +# | | || | + John 555- 1234 192.168.1.10 + Mary 555- 2134 192.168.1.12 + Bob 555- 4527 192.168.1.9 +""" + dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader, + names=('Name', 'Phone', 'TCP'), + col_starts=(0, 9, 18), + col_ends=(5, 17, 28), + ) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[0][1], "555- 1234") + assert_equal(dat[1][0], "Mary") + assert_equal(dat[1][2], "192.168.1.") + assert_equal(dat[2][2], "192.168.1") # col_end=28 cuts this column off + + +def test_read_detect_col_starts_or_ends(): + """Table with no delimiter with only column start or end values specified""" + table = """ +#1 9 19 <== Column start indexes +#| | | <== Column start positions +#<------><--------><-------------> <== Inferred column positions + John 555- 1234 192.168.1.10 + Mary 555- 2134 192.168.1.123 + Bob 555- 4527 192.168.1.9 + Bill 555-9875 192.255.255.255 +""" + for kwargs in ({'col_starts': (1, 9, 19)}, + {'col_ends': (8, 18, 33)}): + dat = ascii.read(table, + Reader=ascii.FixedWidthNoHeader, + names=('Name', 'Phone', 'TCP'), + **kwargs) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[0][1], "555- 1234") + assert_equal(dat[1][0], "Mary") + assert_equal(dat[1][2], "192.168.1.123") + assert_equal(dat[3][2], "192.255.255.255") + + +table = """\ +| Col1 | Col2 | Col3 | Col4 | +| 1.2 | "hello" | 1 | a | +| 2.4 | 's worlds | 2 | 2 | +""" +dat = ascii.read(table, Reader=ascii.FixedWidth) + + +def test_write_normal(): + """Write a table as a normal fixed width table.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth) + assert_equal_splitlines(out.getvalue(), """\ +| Col1 | Col2 | Col3 | Col4 | +| 1.2 | "hello" | 1 | a | +| 2.4 | 's worlds | 2 | 2 | +""") + + +def test_write_fill_values(): + """Write a table as a normal fixed width table.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, + fill_values=('a', 'N/A')) + assert_equal_splitlines(out.getvalue(), """\ +| Col1 | Col2 | Col3 | Col4 | +| 1.2 | "hello" | 1 | N/A | +| 2.4 | 's worlds | 2 | 2 | +""") + + +def test_write_no_pad(): + """Write a table as a fixed width table with no padding.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, + delimiter_pad=None) + assert_equal_splitlines(out.getvalue(), """\ +|Col1| Col2|Col3|Col4| +| 1.2| "hello"| 1| a| +| 2.4|'s worlds| 2| 2| +""") + + +def test_write_no_bookend(): + """Write a table as a fixed width table with no bookend.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, bookend=False) + assert_equal_splitlines(out.getvalue(), """\ +Col1 | Col2 | Col3 | Col4 + 1.2 | "hello" | 1 | a + 2.4 | 's worlds | 2 | 2 +""") + + +def test_write_no_delimiter(): + """Write a table as a fixed width table with no delimiter.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, bookend=False, + delimiter=None) + assert_equal_splitlines(out.getvalue(), """\ +Col1 Col2 Col3 Col4 + 1.2 "hello" 1 a + 2.4 's worlds 2 2 +""") + + +def test_write_noheader_normal(): + """Write a table as a normal fixed width table.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader) + assert_equal_splitlines(out.getvalue(), """\ +| 1.2 | "hello" | 1 | a | +| 2.4 | 's worlds | 2 | 2 | +""") + + +def test_write_noheader_no_pad(): + """Write a table as a fixed width table with no padding.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, + delimiter_pad=None) + assert_equal_splitlines(out.getvalue(), """\ +|1.2| "hello"|1|a| +|2.4|'s worlds|2|2| +""") + + +def test_write_noheader_no_bookend(): + """Write a table as a fixed width table with no bookend.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, + bookend=False) + assert_equal_splitlines(out.getvalue(), """\ +1.2 | "hello" | 1 | a +2.4 | 's worlds | 2 | 2 +""") + + +def test_write_noheader_no_delimiter(): + """Write a table as a fixed width table with no delimiter.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, bookend=False, + delimiter=None) + assert_equal_splitlines(out.getvalue(), """\ +1.2 "hello" 1 a +2.4 's worlds 2 2 +""") + + +def test_write_formats(): + """Write a table as a fixed width table with no delimiter.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, + formats={'Col1': '%-8.3f', 'Col2': '%-15s'}) + assert_equal_splitlines(out.getvalue(), """\ +| Col1 | Col2 | Col3 | Col4 | +| 1.200 | "hello" | 1 | a | +| 2.400 | 's worlds | 2 | 2 | +""") + + +def test_read_twoline_normal(): + """Typical fixed format table with two header lines (with some cruft + thrown in to test column positioning""" + table = """ + Col1 Col2 + ---- --------- + 1.2xx"hello" + 2.4 's worlds +""" + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) + assert_equal(dat.dtype.names, ('Col1', 'Col2')) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hello"') + assert_equal(dat[1][1], "'s worlds") + + +def test_read_twoline_ReST(): + """Read restructured text table""" + table = """ +======= =========== + Col1 Col2 +======= =========== + 1.2 "hello" + 2.4 's worlds +======= =========== +""" + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, + header_start=1, position_line=2, data_end=-1) + assert_equal(dat.dtype.names, ('Col1', 'Col2')) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hello"') + assert_equal(dat[1][1], "'s worlds") + + +def test_read_twoline_human(): + """Read text table designed for humans and test having position line + before the header line""" + table = """ ++------+----------+ +| Col1 | Col2 | ++------|----------+ +| 1.2 | "hello" | +| 2.4 | 's worlds| ++------+----------+ +""" + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, + delimiter='+', + header_start=1, position_line=0, + data_start=3, data_end=-1) + assert_equal(dat.dtype.names, ('Col1', 'Col2')) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hello"') + assert_equal(dat[1][1], "'s worlds") + + +def test_read_twoline_fail(): + """Test failure if too many different character are on position line. + + The position line shall consist of only one character in addition to + the delimiter. + """ + table = """ +| Col1 | Col2 | +|------|==========| +| 1.2 | "hello" | +| 2.4 | 's worlds| +""" + with pytest.raises(InconsistentTableError) as excinfo: + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, + delimiter='|', guess=False) + assert 'Position line should only contain delimiters and one other character' in str(excinfo.value) + + +def test_read_twoline_wrong_marker(): + '''Test failure when position line uses characters prone to ambiguity + + Characters in position line must be part an allowed set because + normal letters or numbers will lead to ambiguous tables. + ''' + table = """ +| Col1 | Col2 | +|aaaaaa|aaaaaaaaaa| +| 1.2 | "hello" | +| 2.4 | 's worlds| +""" + with pytest.raises(InconsistentTableError) as excinfo: + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, + delimiter='|', guess=False) + assert 'Characters in position line must be part' in str(excinfo.value) + + +def test_write_twoline_normal(): + """Write a table as a normal fixed width table.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine) + assert_equal_splitlines(out.getvalue(), """\ +Col1 Col2 Col3 Col4 +---- --------- ---- ---- + 1.2 "hello" 1 a + 2.4 's worlds 2 2 +""") + + +def test_write_twoline_no_pad(): + """Write a table as a fixed width table with no padding.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine, + delimiter_pad=' ', position_char='=') + assert_equal_splitlines(out.getvalue(), """\ +Col1 Col2 Col3 Col4 +==== ========= ==== ==== + 1.2 "hello" 1 a + 2.4 's worlds 2 2 +""") + + +def test_write_twoline_no_bookend(): + """Write a table as a fixed width table with no bookend.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine, + bookend=True, delimiter='|') + assert_equal_splitlines(out.getvalue(), """\ +|Col1| Col2|Col3|Col4| +|----|---------|----|----| +| 1.2| "hello"| 1| a| +| 2.4|'s worlds| 2| 2| +""") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_fixedwidth.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_fixedwidth.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf4ba4d30a4ee65bc32fad2e3bc2260939762809 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_fixedwidth.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_html.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_html.py new file mode 100644 index 0000000000000000000000000000000000000000..9b4294614d2a5e4f9dce71aa9a5a010b3ae8a8d8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/io/ascii/tests/test_html.py @@ -0,0 +1,730 @@ +# -*- coding: utf-8 -*- + +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module tests some of the methods related to the ``HTML`` +reader/writer and aims to document its functionality. + +Requires `BeautifulSoup `_ +to be installed. +""" + +from .. import html +from .. import core +from ....table import Table + +import pytest +import numpy as np + +from .common import setup_function, teardown_function +from ... import ascii +from ....extern.six.moves import range, cStringIO as StringIO +from ....utils.xml.writer import HAS_BLEACH + +# Check to see if the BeautifulSoup dependency is present. +try: + + from bs4 import BeautifulSoup, FeatureNotFound + HAS_BEAUTIFUL_SOUP = True +except ImportError: + HAS_BEAUTIFUL_SOUP = False + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_soupstring(): + """ + Test to make sure the class SoupString behaves properly. + """ + + soup = BeautifulSoup('

foo

') + soup_str = html.SoupString(soup) + assert isinstance(soup_str, str) + assert isinstance(soup_str, html.SoupString) + assert soup_str == '

foo

' + assert soup_str.soup is soup + + +def test_listwriter(): + """ + Test to make sure the class ListWriter behaves properly. + """ + + lst = [] + writer = html.ListWriter(lst) + + for i in range(5): + writer.write(i) + for ch in 'abcde': + writer.write(ch) + + assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e'] + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_identify_table(): + """ + Test to make sure that identify_table() returns whether the + given BeautifulSoup tag is the correct table to process. + """ + + # Should return False on non- tags and None + soup = BeautifulSoup('') + assert html.identify_table(soup, {}, 0) is False + assert html.identify_table(None, {}, 0) is False + + soup = BeautifulSoup('
' + '
A
B
').table + assert html.identify_table(soup, {}, 2) is False + assert html.identify_table(soup, {}, 1) is True # Default index of 1 + + # Same tests, but with explicit parameter + assert html.identify_table(soup, {'table_id': 2}, 1) is False + assert html.identify_table(soup, {'table_id': 1}, 1) is True + + # Test identification by string ID + assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False + assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_missing_data(): + """ + Test reading a table with missing data + """ + # First with default where blank => '0' + table_in = ['', + '', + '', + '', + '
A
1
'] + dat = Table.read(table_in, format='ascii.html') + assert dat.masked is True + assert np.all(dat['A'].mask == [True, False]) + assert dat['A'].dtype.kind == 'i' + + # Now with a specific value '...' => missing + table_in = ['', + '', + '', + '', + '
A
...
1
'] + dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')]) + assert dat.masked is True + assert np.all(dat['A'].mask == [True, False]) + assert dat['A'].dtype.kind == 'i' + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_rename_cols(): + """ + Test reading a table and renaming cols + """ + table_in = ['', + '', + '', + '
A B
12
'] + + # Swap column names + dat = Table.read(table_in, format='ascii.html', names=['B', 'A']) + assert dat.colnames == ['B', 'A'] + assert len(dat) == 1 + + # Swap column names and only include A (the renamed version) + dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A']) + assert dat.colnames == ['A'] + assert len(dat) == 1 + assert np.all(dat['A'] == 2) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_no_names(): + """ + Test reading a table witn no column header + """ + table_in = ['', + '', + '', + '
1
2
'] + dat = Table.read(table_in, format='ascii.html') + assert dat.colnames == ['col1'] + assert len(dat) == 2 + + dat = Table.read(table_in, format='ascii.html', names=['a']) + assert dat.colnames == ['a'] + assert len(dat) == 2 + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_identify_table_fail(): + """ + Raise an exception with an informative error message if table_id + is not found. + """ + table_in = ['', + '
A
B
'] + + with pytest.raises(core.InconsistentTableError) as err: + Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'}, + guess=False) + assert str(err).endswith("ERROR: HTML table id 'bad_id' not found") + + with pytest.raises(core.InconsistentTableError) as err: + Table.read(table_in, format='ascii.html', htmldict={'table_id': 3}, + guess=False) + assert str(err).endswith("ERROR: HTML table number 3 not found") + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_backend_parsers(): + """ + Make sure the user can specify which back-end parser to use + and that an error is raised if the parser is invalid. + """ + for parser in ('lxml', 'xml', 'html.parser', 'html5lib'): + try: + table = Table.read('t/html2.html', format='ascii.html', + htmldict={'parser': parser}, guess=False) + except FeatureNotFound: + if parser == 'html.parser': + raise + # otherwise ignore if the dependency isn't present + + # reading should fail if the parser is invalid + with pytest.raises(FeatureNotFound): + Table.read('t/html2.html', format='ascii.html', + htmldict={'parser': 'foo'}, guess=False) + + +@pytest.mark.skipif('HAS_BEAUTIFUL_SOUP') +def test_htmlinputter_no_bs4(): + """ + This should return an OptionalTableImportError if BeautifulSoup + is not installed. + """ + + inputter = html.HTMLInputter() + with pytest.raises(core.OptionalTableImportError): + inputter.process_lines([]) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_htmlinputter(): + """ + Test to ensure that HTMLInputter correctly converts input + into a list of SoupStrings representing table elements. + """ + + f = 't/html.html' + with open(f) as fd: + table = fd.read() + + inputter = html.HTMLInputter() + inputter.html = {} + + # In absence of table_id, defaults to the first table + expected = ['
Column 1Column 2Column 3
1a1.05
2b2.75
3c-1.25
Column AColumn BColumn C
4d10.5
5e27.5
6f-12.5
C1C2C3
7g105.0
8h275.0
9i-125.0
Col 1Col 2
').tr), + html.SoupString(BeautifulSoup('
Data 1Data 2
').tr)] + expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']] + assert list(splitter(lines)) == expected_data + + # Make sure the presence of a non-SoupString triggers a TypeError + lines.append('Data 3Data 4') + with pytest.raises(TypeError): + list(splitter(lines)) + + # Make sure that passing an empty list triggers an error + with pytest.raises(core.InconsistentTableError): + list(splitter([])) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_htmlheader_start(): + """ + Test to ensure that the start_line method of HTMLHeader + returns the first line of header data. Uses t/html.html + for sample input. + """ + + f = 't/html.html' + with open(f) as fd: + table = fd.read() + + inputter = html.HTMLInputter() + inputter.html = {} + header = html.HTMLHeader() + + lines = inputter.get_lines(table) + assert str(lines[header.start_line(lines)]) == \ + 'Column 1Column 2Column 3' + inputter.html['table_id'] = 'second' + lines = inputter.get_lines(table) + assert str(lines[header.start_line(lines)]) == \ + 'Column AColumn BColumn C' + inputter.html['table_id'] = 3 + lines = inputter.get_lines(table) + assert str(lines[header.start_line(lines)]) == \ + 'C1C2C3' + + # start_line should return None if no valid header is found + lines = [html.SoupString(BeautifulSoup('
Data
').tr), + html.SoupString(BeautifulSoup('

Text

').p)] + assert header.start_line(lines) is None + + # Should raise an error if a non-SoupString is present + lines.append('Header') + with pytest.raises(TypeError): + header.start_line(lines) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_htmldata(): + """ + Test to ensure that the start_line and end_lines methods + of HTMLData returns the first line of table data. Uses + t/html.html for sample input. + """ + + f = 't/html.html' + with open(f) as fd: + table = fd.read() + + inputter = html.HTMLInputter() + inputter.html = {} + data = html.HTMLData() + + lines = inputter.get_lines(table) + assert str(lines[data.start_line(lines)]) == \ + '1a1.05' + # end_line returns the index of the last data element + 1 + assert str(lines[data.end_line(lines) - 1]) == \ + '3c-1.25' + + inputter.html['table_id'] = 'second' + lines = inputter.get_lines(table) + assert str(lines[data.start_line(lines)]) == \ + '4d10.5' + assert str(lines[data.end_line(lines) - 1]) == \ + '6f-12.5' + + inputter.html['table_id'] = 3 + lines = inputter.get_lines(table) + assert str(lines[data.start_line(lines)]) == \ + '7g105.0' + assert str(lines[data.end_line(lines) - 1]) == \ + '9i-125.0' + + # start_line should raise an error if no table data exists + lines = [html.SoupString(BeautifulSoup('
').div), + html.SoupString(BeautifulSoup('

Text

').p)] + with pytest.raises(core.InconsistentTableError): + data.start_line(lines) + + # end_line should return None if no table data exists + assert data.end_line(lines) is None + + # Should raise an error if a non-SoupString is present + lines.append('Data') + with pytest.raises(TypeError): + data.start_line(lines) + with pytest.raises(TypeError): + data.end_line(lines) + + +def test_multicolumn_write(): + """ + Test to make sure that the HTML writer writes multidimensional + columns (those with iterable elements) using the colspan + attribute of . + """ + + col1 = [1, 2, 3] + col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] + col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] + table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) + expected = """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
C1C2C3
11.01.0aaa
22.02.0bbb
33.03.0ccc
+ + + """ + out = html.HTML().write(table)[0].strip() + assert out == expected.strip() + + +@pytest.mark.skipif('not HAS_BLEACH') +def test_multicolumn_write_escape(): + """ + Test to make sure that the HTML writer writes multidimensional + columns (those with iterable elements) using the colspan + attribute of . + """ + + col1 = [1, 2, 3] + col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] + col3 = [('', '', 'a'), ('', 'b', 'b'), ('c', 'c', 'c')] + table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) + expected = """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
C1C2C3
11.01.0a
22.02.0bb
33.03.0ccc
+ + + """ + out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip() + assert out == expected.strip() + + +def test_write_no_multicols(): + """ + Test to make sure that the HTML writer will not use + multi-dimensional columns if the multicol parameter + is False. + """ + + col1 = [1, 2, 3] + col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] + col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] + table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) + expected = """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
C1C2C3
11.0 .. 1.0a .. a
22.0 .. 2.0b .. b
33.0 .. 3.0c .. c
+ + + """ + assert html.HTML({'multicol': False}).write(table)[0].strip() == \ + expected.strip() + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_multicolumn_read(): + """ + Test to make sure that the HTML reader inputs multidimensional + columns (those with iterable elements) using the colspan + attribute of . + + Ensure that any string element within a multidimensional column + casts all elements to string prior to type conversion operations. + """ + + table = Table.read('t/html2.html', format='ascii.html') + str_type = np.dtype((np.str, 21)) + expected = Table(np.array([(['1', '2.5000000000000000001'], 3), + (['1a', '1'], 3.5)], + dtype=[('A', str_type, (2,)), ('B', 'x'], ['y']], names=['a', 'b']) + + # One column contains raw HTML (string input) + out = StringIO() + t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'}) + expected = """\ + + x + <em>y</em> + """ + assert expected in out.getvalue() + + # One column contains raw HTML (list input) + out = StringIO() + t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']}) + assert expected in out.getvalue() + + # Two columns contains raw HTML (list input) + out = StringIO() + t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']}) + expected = """\ + + x + y + """ + assert expected in out.getvalue() + + +@pytest.mark.skipif('not HAS_BLEACH') +def test_raw_html_write_clean(): + """ + Test that columns can contain raw HTML which is not escaped. + """ + import bleach + + t = Table([[''], ['

y

'], ['y']], names=['a', 'b', 'c']) + + # Confirm that +""" % dict(sorting_script1=_SORTING_SCRIPT_PART_1, + sorting_script2=_SORTING_SCRIPT_PART_2) + +HTML_JS_SCRIPT = _SORTING_SCRIPT_PART_1 + _SORTING_SCRIPT_PART_2 + """ +$(document).ready(function() {{ + $('#{tid}').dataTable({{ + order: [], + pageLength: {display_length}, + lengthMenu: {display_length_menu}, + pagingType: "full_numbers", + columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}] + }}); +}} ); +""" + + +# Default CSS for the JSViewer writer +DEFAULT_CSS = """\ +body {font-family: sans-serif;} +table.dataTable {width: auto !important; margin: 0 !important;} +.dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em} +""" + + +# Default CSS used when rendering a table in the IPython notebook +DEFAULT_CSS_NB = """\ +table.dataTable {clear: both; width: auto !important; margin: 0 !important;} +.dataTables_info, .dataTables_length, .dataTables_filter, .dataTables_paginate{ +display: inline-block; margin-right: 1em; } +.paginate_button { margin-right: 5px; } +""" + + +class JSViewer(object): + """Provides an interactive HTML export of a Table. + + This class provides an interface to the `DataTables + `_ library, which allow to visualize interactively + an HTML table. It is used by the `~astropy.table.Table.show_in_browser` + method. + + Parameters + ---------- + use_local_files : bool, optional + Use local files or a CDN for JavaScript libraries. Default False. + display_length : int, optional + Number or rows to show. Default to 50. + + """ + + def __init__(self, use_local_files=False, display_length=50): + self._use_local_files = use_local_files + self.display_length_menu = [[10, 25, 50, 100, 500, 1000, -1], + [10, 25, 50, 100, 500, 1000, "All"]] + self.display_length = display_length + for L in self.display_length_menu: + if display_length not in L: + L.insert(0, display_length) + + @property + def jquery_urls(self): + if self._use_local_files: + return ['file://' + join(EXTERN_JS_DIR, 'jquery-3.1.1.min.js'), + 'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min.js')] + else: + return [conf.jquery_url, conf.datatables_url] + + @property + def css_urls(self): + if self._use_local_files: + return ['file://' + join(EXTERN_CSS_DIR, + 'jquery.dataTables.css')] + else: + return conf.css_urls + + def _jstable_file(self): + if self._use_local_files: + return 'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min') + else: + return conf.datatables_url[:-3] + + def ipynb(self, table_id, css=None, sort_columns='[]'): + html = ''.format(css if css is not None + else DEFAULT_CSS_NB) + html += IPYNB_JS_SCRIPT.format( + display_length=self.display_length, + display_length_menu=self.display_length_menu, + datatables_url=self._jstable_file(), + tid=table_id, sort_columns=sort_columns) + return html + + def html_js(self, table_id='table0', sort_columns='[]'): + return HTML_JS_SCRIPT.format( + display_length=self.display_length, + display_length_menu=self.display_length_menu, + tid=table_id, sort_columns=sort_columns).strip() + + +def write_table_jsviewer(table, filename, table_id=None, max_lines=5000, + table_class="display compact", jskwargs=None, + css=DEFAULT_CSS): + if table_id is None: + table_id = 'table{id}'.format(id=id(table)) + + jskwargs = jskwargs or {} + jsv = JSViewer(**jskwargs) + + sortable_columns = [i for i, col in enumerate(table.columns.values()) + if col.dtype.kind in 'iufc'] + htmldict = { + 'table_id': table_id, + 'table_class': table_class, + 'css': css, + 'cssfiles': jsv.css_urls, + 'jsfiles': jsv.jquery_urls, + 'js': jsv.html_js(table_id=table_id, sort_columns=sortable_columns) + } + + if max_lines < len(table): + table = table[:max_lines] + table.write(filename, format='html', htmldict=htmldict) + + +io_registry.register_writer('jsviewer', Table, write_table_jsviewer) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/jsviewer.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/jsviewer.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31c2a003baec3ef9ac4af03d838514cd3cf7d733 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/jsviewer.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/meta.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/meta.py new file mode 100644 index 0000000000000000000000000000000000000000..ea35e39187f86e1ad22774cbf97822fe8baf77c8 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/meta.py @@ -0,0 +1,342 @@ +import textwrap +import copy +from collections import OrderedDict + +from ..extern import six + +__all__ = ['get_header_from_yaml', 'get_yaml_from_header', 'get_yaml_from_table'] + + +class ColumnOrderList(list): + """ + List of tuples that sorts in a specific order that makes sense for + astropy table column attributes. + """ + + def sort(self, *args, **kwargs): + super(ColumnOrderList, self).sort() + + column_keys = ['name', 'unit', 'datatype', 'format', 'description', 'meta'] + in_dict = dict(self) + out_list = [] + + for key in column_keys: + if key in in_dict: + out_list.append((key, in_dict[key])) + for key, val in self: + if key not in column_keys: + out_list.append((key, val)) + + # Clear list in-place + del self[:] + + self.extend(out_list) + + +class ColumnDict(dict): + """ + Specialized dict subclass to represent attributes of a Column + and return items() in a preferred order. This is only for use + in generating a YAML map representation that has a fixed order. + """ + + def items(self): + """ + Return items as a ColumnOrderList, which sorts in the preferred + way for column attributes. + """ + return ColumnOrderList(super(ColumnDict, self).items()) + + +def _construct_odict(load, node): + """ + Construct OrderedDict from !!omap in yaml safe load. + + Source: https://gist.github.com/weaver/317164 + License: Unspecified + + This is the same as SafeConstructor.construct_yaml_omap(), + except the data type is changed to OrderedDict() and setitem is + used instead of append in the loop + + Examples + -------- + :: + + >>> yaml.load(''' # doctest: +SKIP + ... !!omap + ... - foo: bar + ... - mumble: quux + ... - baz: gorp + ... ''') + OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) + + >>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP + OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) + """ + import yaml + + omap = OrderedDict() + yield omap + if not isinstance(node, yaml.SequenceNode): + raise yaml.constructor.ConstructorError( + "while constructing an ordered map", node.start_mark, + "expected a sequence, but found {}".format(node.id), node.start_mark) + + for subnode in node.value: + if not isinstance(subnode, yaml.MappingNode): + raise yaml.constructor.ConstructorError( + "while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found {}".format(subnode.id), + subnode.start_mark) + + if len(subnode.value) != 1: + raise yaml.constructor.ConstructorError( + "while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found {} items".format(len(subnode.value)), + subnode.start_mark) + + key_node, value_node = subnode.value[0] + key = load.construct_object(key_node) + value = load.construct_object(value_node) + omap[key] = value + + +def _repr_pairs(dump, tag, sequence, flow_style=None): + """ + This is the same code as BaseRepresenter.represent_sequence(), + but the value passed to dump.represent_data() in the loop is a + dictionary instead of a tuple. + + Source: https://gist.github.com/weaver/317164 + License: Unspecified + """ + import yaml + + value = [] + node = yaml.SequenceNode(tag, value, flow_style=flow_style) + if dump.alias_key is not None: + dump.represented_objects[dump.alias_key] = node + best_style = True + for (key, val) in sequence: + item = dump.represent_data({key: val}) + if not (isinstance(item, yaml.ScalarNode) and not item.style): + best_style = False + value.append(item) + if flow_style is None: + if dump.default_flow_style is not None: + node.flow_style = dump.default_flow_style + else: + node.flow_style = best_style + return node + + +def _repr_odict(dumper, data): + """ + Represent OrderedDict in yaml dump. + + Source: https://gist.github.com/weaver/317164 + License: Unspecified + + >>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) + >>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP + '!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n' + >>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP + '!!omap [foo: bar, mumble: quux, baz: gorp]\\n' + """ + return _repr_pairs(dumper, u'tag:yaml.org,2002:omap', six.iteritems(data)) + + +def _repr_column_dict(dumper, data): + """ + Represent ColumnDict in yaml dump. + + This is the same as an ordinary mapping except that the keys + are written in a fixed order that makes sense for astropy table + columns. + """ + return dumper.represent_mapping(u'tag:yaml.org,2002:map', data) + + +def _get_col_attributes(col): + """ + Extract information from a column (apart from the values) that is required + to fully serialize the column. + """ + attrs = ColumnDict() + attrs['name'] = col.info.name + + type_name = col.info.dtype.type.__name__ + if not six.PY2 and type_name.startswith(('bytes', 'str')): + type_name = 'string' + if type_name.endswith('_'): + type_name = type_name[:-1] # string_ and bool_ lose the final _ for ECSV + attrs['datatype'] = type_name + + # Set the output attributes + for attr, nontrivial, xform in (('unit', lambda x: x is not None, str), + ('format', lambda x: x is not None, None), + ('description', lambda x: x is not None, None), + ('meta', lambda x: x, None)): + col_attr = getattr(col.info, attr) + if nontrivial(col_attr): + attrs[attr] = xform(col_attr) if xform else col_attr + + return attrs + + +def get_yaml_from_table(table): + """ + Return lines with a YAML representation of header content from the ``table``. + + Parameters + ---------- + table : `~astropy.table.Table` object + Table for which header content is output + + Returns + ------- + lines : list + List of text lines with YAML header content + """ + + header = {'cols': list(six.itervalues(table.columns))} + if table.meta: + header['meta'] = table.meta + + return get_yaml_from_header(header) + + +def get_yaml_from_header(header): + """ + Return lines with a YAML representation of header content from a Table. + + The ``header`` dict must contain these keys: + + - 'cols' : list of table column objects (required) + - 'meta' : table 'meta' attribute (optional) + + Other keys included in ``header`` will be serialized in the output YAML + representation. + + Parameters + ---------- + header : dict + Table header content + + Returns + ------- + lines : list + List of text lines with YAML header content + """ + try: + import yaml + except ImportError: + raise ImportError('`import yaml` failed, PyYAML package is required for ECSV format') + + from ..io.misc.yaml import AstropyDumper + + class TableDumper(AstropyDumper): + """ + Custom Dumper that represents OrderedDict as an !!omap object. + """ + + def represent_mapping(self, tag, mapping, flow_style=None): + """ + This is a combination of the Python 2 and 3 versions of this method + in the PyYAML library to allow the required key ordering via the + ColumnOrderList object. The Python 3 version insists on turning the + items() mapping into a list object and sorting, which results in + alphabetical order for the column keys. + """ + value = [] + node = yaml.MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + if hasattr(mapping, 'sort'): + mapping.sort() + else: + mapping = list(mapping) + try: + mapping = sorted(mapping) + except TypeError: + pass + + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, yaml.ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + TableDumper.add_representer(OrderedDict, _repr_odict) + TableDumper.add_representer(ColumnDict, _repr_column_dict) + + header = copy.copy(header) # Don't overwrite original + header['datatype'] = [_get_col_attributes(col) for col in header['cols']] + del header['cols'] + + lines = yaml.dump(header, Dumper=TableDumper).splitlines() + return lines + + +class YamlParseError(Exception): + pass + + +def get_header_from_yaml(lines): + """ + Get a header dict from input ``lines`` which should be valid YAML in the + ECSV meta format. This input will typically be created by + get_yaml_from_header. The output is a dictionary which describes all the + table and column meta. + + The get_cols() method in the io/ascii/ecsv.py file should be used as a + guide to using the information when constructing a table using this + header dict information. + + Parameters + ---------- + lines : list + List of text lines with YAML header content + + Returns + ------- + header : dict + Dictionary describing table and column meta + """ + + try: + import yaml + except ImportError: + raise ImportError('`import yaml` failed, PyYAML package is required for ECSV format') + + from ..io.misc.yaml import AstropyLoader + + class TableLoader(AstropyLoader): + """ + Custom Loader that constructs OrderedDict from an !!omap object. + This does nothing but provide a namespace for adding the + custom odict constructor. + """ + + TableLoader.add_constructor(u'tag:yaml.org,2002:omap', _construct_odict) + # Now actually load the YAML data structure into `meta` + header_yaml = textwrap.dedent('\n'.join(lines)) + try: + header = yaml.load(header_yaml, Loader=TableLoader) + except Exception as err: + raise YamlParseError(str(err)) + + return header diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/meta.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/meta.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54121b11a79fb7bd76a0e3e11ffb6d84ad84e150 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/meta.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/np_utils.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/np_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9310ed07f78c25af8f70d38f99b782bb0a81c13f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/np_utils.py @@ -0,0 +1,203 @@ +""" +High-level operations for numpy structured arrays. + +Some code and inspiration taken from numpy.lib.recfunctions.join_by(). +Redistribution license restrictions apply. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six +from ..extern.six.moves import zip, range + +from itertools import chain +import collections +from collections import OrderedDict, Counter + +import numpy as np +import numpy.ma as ma + +from . import _np_utils + +__all__ = ['TableMergeError'] + + +class TableMergeError(ValueError): + pass + + +def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}', + table_names=None): + """ + Find the column names mapping when merging the list of structured ndarrays + ``arrays``. It is assumed that col names in ``common_names`` are to be + merged into a single column while the rest will be uniquely represented + in the output. The args ``uniq_col_name`` and ``table_names`` specify + how to rename columns in case of conflicts. + + Returns a dict mapping each output column name to the input(s). This takes the form + {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names + will be present, while for the other non-key columns the value will be (col_name_0, + None, ..) or (None, col_name_1, ..) etc. + """ + + col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) + col_name_list = [] + + if table_names is None: + table_names = [six.text_type(ii + 1) for ii in range(len(arrays))] + + for idx, array in enumerate(arrays): + table_name = table_names[idx] + for name in array.dtype.names: + out_name = name + + if name in common_names: + # If name is in the list of common_names then insert into + # the column name list, but just once. + if name not in col_name_list: + col_name_list.append(name) + else: + # If name is not one of the common column outputs, and it collides + # with the names in one of the other arrays, then rename + others = list(arrays) + others.pop(idx) + if any(name in other.dtype.names for other in others): + out_name = uniq_col_name.format(table_name=table_name, col_name=name) + col_name_list.append(out_name) + + col_name_map[out_name][idx] = name + + # Check for duplicate output column names + col_name_count = Counter(col_name_list) + repeated_names = [name for name, count in six.iteritems(col_name_count) if count > 1] + if repeated_names: + raise TableMergeError('Merging column names resulted in duplicates: {0}. ' + 'Change uniq_col_name or table_names args to fix this.' + .format(repeated_names)) + + # Convert col_name_map to a regular dict with tuple (immutable) values + col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) + + return col_name_map + + +def get_descrs(arrays, col_name_map): + """ + Find the dtypes descrs resulting from merging the list of arrays' dtypes, + using the column name mapping ``col_name_map``. + + Return a list of descrs for the output. + """ + + out_descrs = [] + + for out_name, in_names in six.iteritems(col_name_map): + # List of input arrays that contribute to this output column + in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] + + # List of names of the columns that contribute to this output column. + names = [name for name in in_names if name is not None] + + # Output dtype is the superset of all dtypes in in_arrays + try: + dtype = common_dtype(in_cols) + except TableMergeError as tme: + # Beautify the error message when we are trying to merge columns with incompatible + # types by including the name of the columns that originated the error. + raise TableMergeError("The '{0}' columns have incompatible types: {1}" + .format(names[0], tme._incompat_types)) + + # Make sure all input shapes are the same + uniq_shapes = set(col.shape[1:] for col in in_cols) + if len(uniq_shapes) != 1: + raise TableMergeError('Key columns {0!r} have different shape'.format(name)) + shape = uniq_shapes.pop() + + out_descrs.append((fix_column_name(out_name), dtype, shape)) + + return out_descrs + + +def common_dtype(cols): + """ + Use numpy to find the common dtype for a list of structured ndarray columns. + + Only allow columns within the following fundamental numpy data types: + np.bool_, np.object_, np.number, np.character, np.void + """ + np_types = (np.bool_, np.object_, np.number, np.character, np.void) + uniq_types = set(tuple(issubclass(col.dtype.type, np_type) for np_type in np_types) + for col in cols) + if len(uniq_types) > 1: + # Embed into the exception the actual list of incompatible types. + incompat_types = [col.dtype.name for col in cols] + tme = TableMergeError('Columns have incompatible types {0}' + .format(incompat_types)) + tme._incompat_types = incompat_types + raise tme + + arrs = [np.empty(1, dtype=col.dtype) for col in cols] + + # For string-type arrays need to explicitly fill in non-zero + # values or the final arr_common = .. step is unpredictable. + for arr in arrs: + if arr.dtype.kind in ('S', 'U'): + arr[0] = '0' * arr.itemsize + + arr_common = np.array([arr[0] for arr in arrs]) + return arr_common.dtype.str + + +def _check_for_sequence_of_structured_arrays(arrays): + err = '`arrays` arg must be a sequence (e.g. list) of structured arrays' + if not isinstance(arrays, collections.Sequence): + raise TypeError(err) + for array in arrays: + # Must be structured array + if not isinstance(array, np.ndarray) or array.dtype.names is None: + raise TypeError(err) + if len(arrays) == 0: + raise ValueError('`arrays` arg must include at least one array') + + +def fix_column_name(val): + """ + Fixes column names so that they are compatible with Numpy on + Python 2. Raises a ValueError exception if the column name + contains Unicode characters, which can not reasonably be used as a + column name. + """ + if val is not None: + try: + val = str(val) + except UnicodeEncodeError: + if six.PY2: + raise ValueError( + "Column names must not contain Unicode characters " + "on Python 2") + raise + + return val + + +def recarray_fromrecords(rec_list): + """ + Partial replacement for `~numpy.core.records.fromrecords` which includes + a workaround for the bug with unicode arrays described at: + https://github.com/astropy/astropy/issues/3052 + + This should not serve as a full replacement for the original function; + this only does enough to fulfill the needs of the table module. + """ + + # Note: This is just copying what Numpy does for converting arbitrary rows + # to column arrays in the recarray module; it could be there is a better + # way + nfields = len(rec_list[0]) + obj = np.array(rec_list, dtype=object) + array_list = [np.array(obj[..., i].tolist()) for i in range(nfields)] + formats = [] + for obj in array_list: + formats.append(obj.dtype.str) + formats = ','.join(formats) + return np.rec.fromarrays(array_list, formats=formats) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/np_utils.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/np_utils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..292325e8b822a626f882b3b079e4047488e788fd Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/np_utils.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/operations.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..6eaaa3afbefe0705a7edd9425fb3a7bed2cec3d9 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/operations.py @@ -0,0 +1,872 @@ +""" +High-level table operations: + +- join() +- hstack() +- vstack() +""" +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six +from ..extern.six.moves import zip, range + +from copy import deepcopy +import warnings +import collections +import itertools +from collections import OrderedDict, Counter + +import numpy as np +from numpy import ma + +from ..utils import metadata +from .column import Column + +from . import _np_utils +from .np_utils import fix_column_name, TableMergeError + +__all__ = ['join', 'hstack', 'vstack', 'unique'] + + +def _merge_table_meta(out, tables, metadata_conflicts='warn'): + out_meta = deepcopy(tables[0].meta) + for table in tables[1:]: + out_meta = metadata.merge(out_meta, table.meta, metadata_conflicts=metadata_conflicts) + out.meta.update(out_meta) + + +def _get_list_of_tables(tables): + """ + Check that tables is a Table or sequence of Tables. Returns the + corresponding list of Tables. + """ + from .table import Table, Row + + # Make sure we have a list of things + if not isinstance(tables, collections.Sequence): + tables = [tables] + + # Make sure each thing is a Table or Row + if any(not isinstance(x, (Table, Row)) for x in tables) or len(tables) == 0: + raise TypeError('`tables` arg must be a Table or sequence of Tables or Rows') + + # Convert any Rows to Tables + tables = [(x if isinstance(x, Table) else Table(x)) for x in tables] + + return tables + + +def _get_out_class(objs): + """ + From a list of input objects ``objs`` get merged output object class. + + This is just taken as the deepest subclass. This doesn't handle complicated + inheritance schemes. + """ + out_class = objs[0].__class__ + for obj in objs[1:]: + if issubclass(obj.__class__, out_class): + out_class = obj.__class__ + + if any(not issubclass(out_class, obj.__class__) for obj in objs): + raise ValueError('unmergeable object classes {}' + .format([obj.__class__.__name__ for obj in objs])) + + return out_class + + +def join(left, right, keys=None, join_type='inner', + uniq_col_name='{col_name}_{table_name}', + table_names=['1', '2'], metadata_conflicts='warn'): + """ + Perform a join of the left table with the right table on specified keys. + + Parameters + ---------- + left : Table object or a value that will initialize a Table object + Left side table in the join + right : Table object or a value that will initialize a Table object + Right side table in the join + keys : str or list of str + Name(s) of column(s) used to match rows of left and right tables. + Default is to use all columns which are common to both tables. + join_type : str + Join type ('inner' | 'outer' | 'left' | 'right'), default is 'inner' + uniq_col_name : str or None + String generate a unique output column name in case of a conflict. + The default is '{col_name}_{table_name}'. + table_names : list of str or None + Two-element list of table names used when generating unique output + column names. The default is ['1', '2']. + metadata_conflicts : str + How to proceed with metadata conflicts. This should be one of: + * ``'silent'``: silently pick the last conflicting meta-data value + * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) + * ``'error'``: raise an exception. + + Returns + ------- + joined_table : `~astropy.table.Table` object + New table containing the result of the join operation. + """ + from .table import Table + + # Try converting inputs to Table as needed + if not isinstance(left, Table): + left = Table(left) + if not isinstance(right, Table): + right = Table(right) + + col_name_map = OrderedDict() + out = _join(left, right, keys, join_type, + uniq_col_name, table_names, col_name_map, metadata_conflicts) + + # Merge the column and table meta data. Table subclasses might override + # these methods for custom merge behavior. + _merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts) + + return out + + +def vstack(tables, join_type='outer', metadata_conflicts='warn'): + """ + Stack tables vertically (along rows) + + A ``join_type`` of 'exact' means that the tables must all have exactly + the same column names (though the order can vary). If ``join_type`` + is 'inner' then the intersection of common columns will be the output. + A value of 'outer' (default) means the output will have the union of + all columns, with table values being masked where no common values are + available. + + Parameters + ---------- + tables : Table or list of Table objects + Table(s) to stack along rows (vertically) with the current table + join_type : str + Join type ('inner' | 'exact' | 'outer'), default is 'outer' + metadata_conflicts : str + How to proceed with metadata conflicts. This should be one of: + * ``'silent'``: silently pick the last conflicting meta-data value + * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) + * ``'error'``: raise an exception. + + Returns + ------- + stacked_table : `~astropy.table.Table` object + New table containing the stacked data from the input tables. + + Examples + -------- + To stack two tables along rows do:: + + >>> from astropy.table import vstack, Table + >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) + >>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b')) + >>> print(t1) + a b + --- --- + 1 3 + 2 4 + >>> print(t2) + a b + --- --- + 5 7 + 6 8 + >>> print(vstack([t1, t2])) + a b + --- --- + 1 3 + 2 4 + 5 7 + 6 8 + """ + tables = _get_list_of_tables(tables) # validates input + if len(tables) == 1: + return tables[0] # no point in stacking a single table + col_name_map = OrderedDict() + + out = _vstack(tables, join_type, col_name_map, metadata_conflicts) + + # Merge table metadata + _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) + + return out + + +def hstack(tables, join_type='outer', + uniq_col_name='{col_name}_{table_name}', table_names=None, + metadata_conflicts='warn'): + """ + Stack tables along columns (horizontally) + + A ``join_type`` of 'exact' means that the tables must all + have exactly the same number of rows. If ``join_type`` is 'inner' then + the intersection of rows will be the output. A value of 'outer' (default) + means the output will have the union of all rows, with table values being + masked where no common values are available. + + Parameters + ---------- + tables : List of Table objects + Tables to stack along columns (horizontally) with the current table + join_type : str + Join type ('inner' | 'exact' | 'outer'), default is 'outer' + uniq_col_name : str or None + String generate a unique output column name in case of a conflict. + The default is '{col_name}_{table_name}'. + table_names : list of str or None + Two-element list of table names used when generating unique output + column names. The default is ['1', '2', ..]. + metadata_conflicts : str + How to proceed with metadata conflicts. This should be one of: + * ``'silent'``: silently pick the last conflicting meta-data value + * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) + * ``'error'``: raise an exception. + + Returns + ------- + stacked_table : `~astropy.table.Table` object + New table containing the stacked data from the input tables. + + Examples + -------- + To stack two tables horizontally (along columns) do:: + + >>> from astropy.table import Table, hstack + >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) + >>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd')) + >>> print(t1) + a b + --- --- + 1 3 + 2 4 + >>> print(t2) + c d + --- --- + 5 7 + 6 8 + >>> print(hstack([t1, t2])) + a b c d + --- --- --- --- + 1 3 5 7 + 2 4 6 8 + """ + tables = _get_list_of_tables(tables) # validates input + if len(tables) == 1: + return tables[0] # no point in stacking a single table + col_name_map = OrderedDict() + + out = _hstack(tables, join_type, uniq_col_name, table_names, + col_name_map) + + _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) + + return out + + +def unique(input_table, keys=None, silent=False, keep='first'): + """ + Returns the unique rows of a table. + + Parameters + ---------- + + input_table : `~astropy.table.Table` object or a value that + will initialize a `~astropy.table.Table` object + keys : str or list of str + Name(s) of column(s) used to create unique rows. + Default is to use all columns. + keep : one of 'first', 'last' or 'none' + Whether to keep the first or last row for each set of + duplicates. If 'none', all rows that are duplicate are + removed, leaving only rows that are already unique in + the input. + Default is 'first'. + silent : boolean + If `True`, masked value column(s) are silently removed from + ``keys``. If `False`, an exception is raised when ``keys`` + contains masked value column(s). + Default is `False`. + + Returns + ------- + unique_table : `~astropy.table.Table` object + New table containing only the unique rows of ``input_table``. + + Examples + -------- + >>> from astropy.table import unique, Table + >>> import numpy as np + >>> table = Table(data=[[1,2,3,2,3,3], + ... [2,3,4,5,4,6], + ... [3,4,5,6,7,8]], + ... names=['col1', 'col2', 'col3'], + ... dtype=[np.int32, np.int32, np.int32]) + >>> table + + col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 3 4 + 3 4 5 + 2 5 6 + 3 4 7 + 3 6 8 + >>> unique(table, keys='col1') +
+ col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 3 4 + 3 4 5 + >>> unique(table, keys=['col1'], keep='last') +
+ col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 5 6 + 3 6 8 + >>> unique(table, keys=['col1', 'col2']) +
+ col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 3 4 + 2 5 6 + 3 4 5 + 3 6 8 + >>> unique(table, keys=['col1', 'col2'], keep='none') +
+ col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 3 4 + 2 5 6 + 3 6 8 + >>> unique(table, keys=['col1'], keep='none') +
+ col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + + """ + + if keep not in ('first', 'last', 'none'): + raise ValueError("'keep' should be one of 'first', 'last', 'none'") + + if isinstance(keys, six.string_types): + keys = [keys] + if keys is None: + keys = input_table.colnames + else: + if len(set(keys)) != len(keys): + raise ValueError("duplicate key names") + + if input_table.masked: + nkeys = 0 + for key in keys[:]: + if np.any(input_table[key].mask): + if not silent: + raise ValueError( + "cannot use columns with masked values as keys; " + "remove column '{0}' from keys and rerun " + "unique()".format(key)) + del keys[keys.index(key)] + if len(keys) == 0: + raise ValueError("no column remained in ``keys``; " + "unique() cannot work with masked value " + "key columns") + + grouped_table = input_table.group_by(keys) + indices = grouped_table.groups.indices + if keep == 'first': + indices = indices[:-1] + elif keep == 'last': + indices = indices[1:] - 1 + else: + indices = indices[:-1][np.diff(indices) == 1] + + return grouped_table[indices] + + +def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}', + table_names=None): + """ + Find the column names mapping when merging the list of tables + ``arrays``. It is assumed that col names in ``common_names`` are to be + merged into a single column while the rest will be uniquely represented + in the output. The args ``uniq_col_name`` and ``table_names`` specify + how to rename columns in case of conflicts. + + Returns a dict mapping each output column name to the input(s). This takes the form + {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names + will be present, while for the other non-key columns the value will be (col_name_0, + None, ..) or (None, col_name_1, ..) etc. + """ + + col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) + col_name_list = [] + + if table_names is None: + table_names = [six.text_type(ii + 1) for ii in range(len(arrays))] + + for idx, array in enumerate(arrays): + table_name = table_names[idx] + for name in array.colnames: + out_name = name + + if name in common_names: + # If name is in the list of common_names then insert into + # the column name list, but just once. + if name not in col_name_list: + col_name_list.append(name) + else: + # If name is not one of the common column outputs, and it collides + # with the names in one of the other arrays, then rename + others = list(arrays) + others.pop(idx) + if any(name in other.colnames for other in others): + out_name = uniq_col_name.format(table_name=table_name, col_name=name) + col_name_list.append(out_name) + + col_name_map[out_name][idx] = name + + # Check for duplicate output column names + col_name_count = Counter(col_name_list) + repeated_names = [name for name, count in six.iteritems(col_name_count) if count > 1] + if repeated_names: + raise TableMergeError('Merging column names resulted in duplicates: {0}. ' + 'Change uniq_col_name or table_names args to fix this.' + .format(repeated_names)) + + # Convert col_name_map to a regular dict with tuple (immutable) values + col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) + + return col_name_map + + +def get_descrs(arrays, col_name_map): + """ + Find the dtypes descrs resulting from merging the list of arrays' dtypes, + using the column name mapping ``col_name_map``. + + Return a list of descrs for the output. + """ + + out_descrs = [] + + for out_name, in_names in six.iteritems(col_name_map): + # List of input arrays that contribute to this output column + in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] + + # List of names of the columns that contribute to this output column. + names = [name for name in in_names if name is not None] + + # Output dtype is the superset of all dtypes in in_arrays + try: + dtype = common_dtype(in_cols) + except TableMergeError as tme: + # Beautify the error message when we are trying to merge columns with incompatible + # types by including the name of the columns that originated the error. + raise TableMergeError("The '{0}' columns have incompatible types: {1}" + .format(names[0], tme._incompat_types)) + + # Make sure all input shapes are the same + uniq_shapes = set(col.shape[1:] for col in in_cols) + if len(uniq_shapes) != 1: + raise TableMergeError('Key columns {0!r} have different shape'.format(names)) + shape = uniq_shapes.pop() + + out_descrs.append((fix_column_name(out_name), dtype, shape)) + + return out_descrs + + +def common_dtype(cols): + """ + Use numpy to find the common dtype for a list of columns. + + Only allow columns within the following fundamental numpy data types: + np.bool_, np.object_, np.number, np.character, np.void + """ + try: + return metadata.common_dtype(cols) + except metadata.MergeConflictError as err: + tme = TableMergeError('Columns have incompatible types {0}' + .format(err._incompat_types)) + tme._incompat_types = err._incompat_types + raise tme + + +def _join(left, right, keys=None, join_type='inner', + uniq_col_name='{col_name}_{table_name}', + table_names=['1', '2'], + col_name_map=None, metadata_conflicts='warn'): + """ + Perform a join of the left and right Tables on specified keys. + + Parameters + ---------- + left : Table + Left side table in the join + right : Table + Right side table in the join + keys : str or list of str + Name(s) of column(s) used to match rows of left and right tables. + Default is to use all columns which are common to both tables. + join_type : str + Join type ('inner' | 'outer' | 'left' | 'right'), default is 'inner' + uniq_col_name : str or None + String generate a unique output column name in case of a conflict. + The default is '{col_name}_{table_name}'. + table_names : list of str or None + Two-element list of table names used when generating unique output + column names. The default is ['1', '2']. + col_name_map : empty dict or None + If passed as a dict then it will be updated in-place with the + mapping of output to input column names. + + Returns + ------- + joined_table : `~astropy.table.Table` object + New table containing the result of the join operation. + """ + # Store user-provided col_name_map until the end + _col_name_map = col_name_map + + if join_type not in ('inner', 'outer', 'left', 'right'): + raise ValueError("The 'join_type' argument should be in 'inner', " + "'outer', 'left' or 'right' (got '{0}' instead)". + format(join_type)) + + # If we have a single key, put it in a tuple + if keys is None: + keys = tuple(name for name in left.colnames if name in right.colnames) + if len(keys) == 0: + raise TableMergeError('No keys in common between left and right tables') + elif isinstance(keys, six.string_types): + keys = (keys,) + + # Check the key columns + for arr, arr_label in ((left, 'Left'), (right, 'Right')): + for name in keys: + if name not in arr.colnames: + raise TableMergeError('{0} table does not have key column {1!r}' + .format(arr_label, name)) + if hasattr(arr[name], 'mask') and np.any(arr[name].mask): + raise TableMergeError('{0} key column {1!r} has missing values' + .format(arr_label, name)) + if not isinstance(arr[name], np.ndarray): + raise ValueError("non-ndarray column '{}' not allowed as a key column" + .format(name)) + + len_left, len_right = len(left), len(right) + + if len_left == 0 or len_right == 0: + raise ValueError('input tables for join must both have at least one row') + + # Joined array dtype as a list of descr (name, type_str, shape) tuples + col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names) + out_descrs = get_descrs([left, right], col_name_map) + + # Make an array with just the key columns. This uses a temporary + # structured array for efficiency. + out_keys_dtype = [descr for descr in out_descrs if descr[0] in keys] + out_keys = np.empty(len_left + len_right, dtype=out_keys_dtype) + for key in keys: + out_keys[key][:len_left] = left[key] + out_keys[key][len_left:] = right[key] + idx_sort = out_keys.argsort(order=keys) + out_keys = out_keys[idx_sort] + + # Get all keys + diffs = np.concatenate(([True], out_keys[1:] != out_keys[:-1], [True])) + idxs = np.flatnonzero(diffs) + + # Main inner loop in Cython to compute the cartesion product + # indices for the given join type + int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3}[join_type] + masked, n_out, left_out, left_mask, right_out, right_mask = \ + _np_utils.join_inner(idxs, idx_sort, len_left, int_join_type) + + # If either of the inputs are masked then the output is masked + if left.masked or right.masked: + masked = True + masked = bool(masked) + + out = _get_out_class([left, right])(masked=masked) + + for out_name, dtype, shape in out_descrs: + + left_name, right_name = col_name_map[out_name] + if left_name and right_name: # this is a key which comes from left and right + cols = [left[left_name], right[right_name]] + + col_cls = _get_out_class(cols) + if not hasattr(col_cls.info, 'new_like'): + raise NotImplementedError('join unavailable for mixin column type(s): {}' + .format(col_cls.__name__)) + + out[out_name] = col_cls.info.new_like(cols, n_out, metadata_conflicts, out_name) + + if issubclass(col_cls, Column): + out[out_name][:] = np.where(right_mask, + left[left_name].take(left_out), + right[right_name].take(right_out)) + else: + # np.where does not work for mixin columns (e.g. Quantity) so + # use a slower workaround. + left_mask = ~right_mask + if np.any(left_mask): + out[out_name][left_mask] = left[left_name].take(left_out) + if np.any(right_mask): + out[out_name][right_mask] = right[right_name].take(right_out) + continue + elif left_name: # out_name came from the left table + name, array, array_out, array_mask = left_name, left, left_out, left_mask + elif right_name: + name, array, array_out, array_mask = right_name, right, right_out, right_mask + else: + raise TableMergeError('Unexpected column names (maybe one is ""?)') + + # Finally add the joined column to the output table. + out[out_name] = array[name][array_out] + + # If the output table is masked then set the output column masking + # accordingly. Check for columns that don't support a mask attribute. + if masked: + # array_mask is 1-d corresponding to length of output column. We need + # make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..). + # Mixin columns might not have ndim attribute so use len(col.shape). + array_mask.shape = (out[out_name].shape[0],) + (1,) * (len(out[out_name].shape) - 1) + + if array.masked: + array_mask = array_mask | array[name].mask[array_out] + try: + out[out_name].mask[:] = array_mask + except ValueError: + raise NotImplementedError( + "join requires masking column '{}' but column" + " type {} does not support masking" + .format(out_name, out[out_name].__class__.__name__)) + + # If col_name_map supplied as a dict input, then update. + if isinstance(_col_name_map, collections.Mapping): + _col_name_map.update(col_name_map) + + return out + + +def _vstack(arrays, join_type='outer', col_name_map=None, metadata_conflicts='warn'): + """ + Stack Tables vertically (by rows) + + A ``join_type`` of 'exact' (default) means that the arrays must all + have exactly the same column names (though the order can vary). If + ``join_type`` is 'inner' then the intersection of common columns will + be the output. A value of 'outer' means the output will have the union of + all columns, with array values being masked where no common values are + available. + + Parameters + ---------- + arrays : list of Tables + Tables to stack by rows (vertically) + join_type : str + Join type ('inner' | 'exact' | 'outer'), default is 'outer' + col_name_map : empty dict or None + If passed as a dict then it will be updated in-place with the + mapping of output to input column names. + + Returns + ------- + stacked_table : `~astropy.table.Table` object + New table containing the stacked data from the input tables. + """ + # Store user-provided col_name_map until the end + _col_name_map = col_name_map + + # Input validation + if join_type not in ('inner', 'exact', 'outer'): + raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'") + + # Trivial case of one input array + if len(arrays) == 1: + return arrays[0] + + # Start by assuming an outer match where all names go to output + names = set(itertools.chain(*[arr.colnames for arr in arrays])) + col_name_map = get_col_name_map(arrays, names) + + # If require_match is True then the output must have exactly the same + # number of columns as each input array + if join_type == 'exact': + for names in six.itervalues(col_name_map): + if any(x is None for x in names): + raise TableMergeError('Inconsistent columns in input arrays ' + "(use 'inner' or 'outer' join_type to " + "allow non-matching columns)") + join_type = 'outer' + + # For an inner join, keep only columns where all input arrays have that column + if join_type == 'inner': + col_name_map = OrderedDict((name, in_names) for name, in_names in six.iteritems(col_name_map) + if all(x is not None for x in in_names)) + if len(col_name_map) == 0: + raise TableMergeError('Input arrays have no columns in common') + + # If there are any output columns where one or more input arrays are missing + # then the output must be masked. If any input arrays are masked then + # output is masked. + masked = any(getattr(arr, 'masked', False) for arr in arrays) + for names in six.itervalues(col_name_map): + if any(x is None for x in names): + masked = True + break + + lens = [len(arr) for arr in arrays] + n_rows = sum(lens) + out = _get_out_class(arrays)(masked=masked) + + for out_name, in_names in six.iteritems(col_name_map): + # List of input arrays that contribute to this output column + cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] + + col_cls = _get_out_class(cols) + if not hasattr(col_cls.info, 'new_like'): + raise NotImplementedError('vstack unavailable for mixin column type(s): {}' + .format(col_cls.__name__)) + try: + out[out_name] = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name) + except metadata.MergeConflictError as err: + # Beautify the error message when we are trying to merge columns with incompatible + # types by including the name of the columns that originated the error. + raise TableMergeError("The '{0}' columns have incompatible types: {1}" + .format(out_name, err._incompat_types)) + + idx0 = 0 + for name, array in zip(in_names, arrays): + idx1 = idx0 + len(array) + if name in array.colnames: + out[out_name][idx0:idx1] = array[name] + else: + try: + out[out_name].mask[idx0:idx1] = True + except ValueError: + raise NotImplementedError( + "vstack requires masking column '{}' but column" + " type {} does not support masking" + .format(out_name, out[out_name].__class__.__name__)) + idx0 = idx1 + + # If col_name_map supplied as a dict input, then update. + if isinstance(_col_name_map, collections.Mapping): + _col_name_map.update(col_name_map) + + return out + + +def _hstack(arrays, join_type='outer', uniq_col_name='{col_name}_{table_name}', + table_names=None, col_name_map=None): + """ + Stack tables horizontally (by columns) + + A ``join_type`` of 'exact' (default) means that the arrays must all + have exactly the same number of rows. If ``join_type`` is 'inner' then + the intersection of rows will be the output. A value of 'outer' means + the output will have the union of all rows, with array values being + masked where no common values are available. + + Parameters + ---------- + arrays : List of tables + Tables to stack by columns (horizontally) + join_type : str + Join type ('inner' | 'exact' | 'outer'), default is 'outer' + uniq_col_name : str or None + String generate a unique output column name in case of a conflict. + The default is '{col_name}_{table_name}'. + table_names : list of str or None + Two-element list of table names used when generating unique output + column names. The default is ['1', '2', ..]. + + Returns + ------- + stacked_table : `~astropy.table.Table` object + New table containing the stacked data from the input tables. + """ + + # Store user-provided col_name_map until the end + _col_name_map = col_name_map + + # Input validation + if join_type not in ('inner', 'exact', 'outer'): + raise ValueError("join_type arg must be either 'inner', 'exact' or 'outer'") + + if table_names is None: + table_names = ['{0}'.format(ii + 1) for ii in range(len(arrays))] + if len(arrays) != len(table_names): + raise ValueError('Number of arrays must match number of table_names') + + # Trivial case of one input arrays + if len(arrays) == 1: + return arrays[0] + + col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names) + + # If require_match is True then all input arrays must have the same length + arr_lens = [len(arr) for arr in arrays] + if join_type == 'exact': + if len(set(arr_lens)) > 1: + raise TableMergeError("Inconsistent number of rows in input arrays " + "(use 'inner' or 'outer' join_type to allow " + "non-matching rows)") + join_type = 'outer' + + # For an inner join, keep only the common rows + if join_type == 'inner': + min_arr_len = min(arr_lens) + if len(set(arr_lens)) > 1: + arrays = [arr[:min_arr_len] for arr in arrays] + arr_lens = [min_arr_len for arr in arrays] + + # If there are any output rows where one or more input arrays are missing + # then the output must be masked. If any input arrays are masked then + # output is masked. + masked = any(getattr(arr, 'masked', False) for arr in arrays) or len(set(arr_lens)) > 1 + + n_rows = max(arr_lens) + out = _get_out_class(arrays)(masked=masked) + + for out_name, in_names in six.iteritems(col_name_map): + for name, array, arr_len in zip(in_names, arrays, arr_lens): + if name is None: + continue + + if n_rows > arr_len: + indices = np.arange(n_rows) + indices[arr_len:] = 0 + out[out_name] = array[name][indices] + try: + out[out_name].mask[arr_len:] = True + except ValueError: + raise NotImplementedError( + "hstack requires masking column '{}' but column" + " type {} does not support masking" + .format(out_name, out[out_name].__class__.__name__)) + else: + out[out_name] = array[name][:n_rows] + + # If col_name_map supplied as a dict input, then update. + if isinstance(_col_name_map, collections.Mapping): + _col_name_map.update(col_name_map) + + return out diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/operations.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/operations.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46707b1684819e8697851558b1fcdee615f8c885 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/operations.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/pprint.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/pprint.py new file mode 100644 index 0000000000000000000000000000000000000000..f7c7632a8c19afdbb57513c2f63949f3e9f7ff75 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/pprint.py @@ -0,0 +1,713 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six +from ..extern.six import text_type +from ..extern.six.moves import zip, range + +import os +import sys +import re + +import numpy as np + +from .. import log +from ..utils.console import Getch, color_print, terminal_size, conf +from ..utils.data_info import dtype_info_name + +__all__ = [] + + +def default_format_func(format_, val): + if isinstance(val, bytes): + return val.decode('utf-8', errors='replace') + else: + return text_type(val) + + +# The first three functions are helpers for _auto_format_func + +def _use_str_for_masked_values(format_func): + """Wrap format function to trap masked values. + + String format functions and most user functions will not be able to deal + with masked values, so we wrap them to ensure they are passed to str(). + """ + return lambda format_, val: (str(val) if val is np.ma.masked + else format_func(format_, val)) + + +def _possible_string_format_functions(format_): + """Iterate through possible string-derived format functions. + + A string can either be a format specifier for the format built-in, + a new-style format string, or an old-style format string. + """ + yield lambda format_, val: format(val, format_) + yield lambda format_, val: format_.format(val) + yield lambda format_, val: format_ % val + + +def get_auto_format_func( + col=None, + possible_string_format_functions=_possible_string_format_functions): + """ + Return a wrapped ``auto_format_func`` function which is used in + formatting table columns. This is primarily an internal function but + gets used directly in other parts of astropy, e.g. `astropy.io.ascii`. + + Parameters + ---------- + col_name : object, optional + Hashable object to identify column like id or name. Default is None. + + possible_string_format_functions : func, optional + Function that yields possible string formatting functions + (defaults to internal function to do this). + + Returns + ------- + Wrapped ``auto_format_func`` function + """ + + def _auto_format_func(format_, val): + """Format ``val`` according to ``format_`` for a plain format specifier, + old- or new-style format strings, or using a user supplied function. + More importantly, determine and cache (in _format_funcs) a function + that will do this subsequently. In this way this complicated logic is + only done for the first value. + + Returns the formatted value. + """ + if format_ is None: + return default_format_func(format_, val) + + if format_ in col.info._format_funcs: + return col.info._format_funcs[format_](format_, val) + + if six.callable(format_): + format_func = lambda format_, val: format_(val) + try: + out = format_func(format_, val) + if not isinstance(out, six.string_types): + raise ValueError('Format function for value {0} returned {1} ' + 'instead of string type' + .format(val, type(val))) + except Exception as err: + # For a masked element, the format function call likely failed + # to handle it. Just return the string representation for now, + # and retry when a non-masked value comes along. + if val is np.ma.masked: + return str(val) + + raise ValueError('Format function for value {0} failed: {1}' + .format(val, err)) + # If the user-supplied function handles formatting masked elements, use + # it directly. Otherwise, wrap it in a function that traps them. + try: + format_func(format_, np.ma.masked) + except Exception: + format_func = _use_str_for_masked_values(format_func) + else: + # For a masked element, we cannot set string-based format functions yet, + # as all tests below will fail. Just return the string representation + # of masked for now, and retry when a non-masked value comes along. + if val is np.ma.masked: + return str(val) + + for format_func in possible_string_format_functions(format_): + try: + # Does this string format method work? + out = format_func(format_, val) + # Require that the format statement actually did something. + if out == format_: + raise ValueError('the format passed in did nothing.') + except Exception: + continue + else: + break + else: + # None of the possible string functions passed muster. + raise ValueError('Unable to parse format string {0}' + .format(format_)) + + # String-based format functions will fail on masked elements; + # wrap them in a function that traps them. + format_func = _use_str_for_masked_values(format_func) + + col.info._format_funcs[format_] = format_func + return out + + return _auto_format_func + + +class TableFormatter(object): + @staticmethod + def _get_pprint_size(max_lines=None, max_width=None): + """Get the output size (number of lines and character width) for Column and + Table pformat/pprint methods. + + If no value of ``max_lines`` is supplied then the height of the + screen terminal is used to set ``max_lines``. If the terminal + height cannot be determined then the default will be determined + using the ``astropy.table.conf.max_lines`` configuration item. If a + negative value of ``max_lines`` is supplied then there is no line + limit applied. + + The same applies for max_width except the configuration item is + ``astropy.table.conf.max_width``. + + Parameters + ---------- + max_lines : int or None + Maximum lines of output (header + data rows) + + max_width : int or None + Maximum width (characters) output + + Returns + ------- + max_lines, max_width : int + + """ + if max_lines is None: + max_lines = conf.max_lines + + if max_width is None: + max_width = conf.max_width + + if max_lines is None or max_width is None: + lines, width = terminal_size() + + if max_lines is None: + max_lines = lines + elif max_lines < 0: + max_lines = sys.maxsize + if max_lines < 8: + max_lines = 8 + + if max_width is None: + max_width = width + elif max_width < 0: + max_width = sys.maxsize + if max_width < 10: + max_width = 10 + + return max_lines, max_width + + def _pformat_col(self, col, max_lines=None, show_name=True, show_unit=None, + show_dtype=False, show_length=None, html=False, align=None): + """Return a list of formatted string representation of column values. + + Parameters + ---------- + max_lines : int + Maximum lines of output (header + data rows) + + show_name : bool + Include column name. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include column dtype. Default is False. + + show_length : bool + Include column length at end. Default is to show this only + if the column is not shown completely. + + html : bool + Output column as HTML + + align : str + Left/right alignment of columns. Default is '>' (right) for all + columns. Other allowed values are '<', '^', and '0=' for left, + centered, and 0-padded, respectively. + + Returns + ------- + lines : list + List of lines with formatted column values + + outs : dict + Dict which is used to pass back additional values + defined within the iterator. + + """ + if show_unit is None: + show_unit = col.info.unit is not None + + outs = {} # Some values from _pformat_col_iter iterator that are needed here + col_strs_iter = self._pformat_col_iter(col, max_lines, show_name=show_name, + show_unit=show_unit, + show_dtype=show_dtype, + show_length=show_length, + outs=outs) + col_strs = list(col_strs_iter) + if len(col_strs) > 0: + col_width = max(len(x) for x in col_strs) + + if html: + from ..utils.xml.writer import xml_escape + n_header = outs['n_header'] + for i, col_str in enumerate(col_strs): + # _pformat_col output has a header line '----' which is not needed here + if i == n_header - 1: + continue + td = 'th' if i < n_header else 'td' + val = '<{0}>{1}'.format(td, xml_escape(col_str.strip()), td) + row = ('' + val + '') + if i < n_header: + row = ('' + row + '') + col_strs[i] = row + + if n_header > 0: + # Get rid of '---' header line + col_strs.pop(n_header - 1) + col_strs.insert(0, '
') + col_strs.append('
') + + # Now bring all the column string values to the same fixed width + else: + col_width = max(len(x) for x in col_strs) if col_strs else 1 + + # Center line header content and generate dashed headerline + for i in outs['i_centers']: + col_strs[i] = col_strs[i].center(col_width) + if outs['i_dashes'] is not None: + col_strs[outs['i_dashes']] = '-' * col_width + + # Format columns according to alignment. `align` arg has precedent, otherwise + # use `col.format` if it starts as a legal alignment string. If neither applies + # then right justify. + re_fill_align = re.compile(r'(?P.?)(?P[<^>=])') + match = None + if align: + # If there is an align specified then it must match + match = re_fill_align.match(align) + if not match: + raise ValueError("column align must be one of '<', '^', '>', or '='") + elif isinstance(col.info.format, six.string_types): + # col.info.format need not match, in which case rjust gets used + match = re_fill_align.match(col.info.format) + + if match: + fill_char = match.group('fill') + align_char = match.group('align') + if align_char == '=': + if fill_char != '0': + raise ValueError("fill character must be '0' for '=' align") + fill_char = '' # str.zfill gets used which does not take fill char arg + else: + fill_char = '' + align_char = '>' + + justify_methods = {'<': 'ljust', '^': 'center', '>': 'rjust', '=': 'zfill'} + justify_method = justify_methods[align_char] + justify_args = (col_width, fill_char) if fill_char else (col_width,) + + for i, col_str in enumerate(col_strs): + col_strs[i] = getattr(col_str, justify_method)(*justify_args) + + if outs['show_length']: + col_strs.append('Length = {0} rows'.format(len(col))) + + return col_strs, outs + + def _pformat_col_iter(self, col, max_lines, show_name, show_unit, outs, + show_dtype=False, show_length=None): + """Iterator which yields formatted string representation of column values. + + Parameters + ---------- + max_lines : int + Maximum lines of output (header + data rows) + + show_name : bool + Include column name. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + outs : dict + Must be a dict which is used to pass back additional values + defined within the iterator. + + show_dtype : bool + Include column dtype. Default is False. + + show_length : bool + Include column length at end. Default is to show this only + if the column is not shown completely. + """ + max_lines, _ = self._get_pprint_size(max_lines, -1) + + multidims = getattr(col, 'shape', [0])[1:] + if multidims: + multidim0 = tuple(0 for n in multidims) + multidim1 = tuple(n - 1 for n in multidims) + trivial_multidims = np.prod(multidims) == 1 + + i_dashes = None + i_centers = [] # Line indexes where content should be centered + n_header = 0 + if show_name: + i_centers.append(n_header) + # Get column name (or 'None' if not set) + col_name = six.text_type(col.info.name) + if multidims: + col_name += ' [{0}]'.format( + ','.join(six.text_type(n) for n in multidims)) + n_header += 1 + yield col_name + if show_unit: + i_centers.append(n_header) + n_header += 1 + yield six.text_type(col.info.unit or '') + if show_dtype: + i_centers.append(n_header) + n_header += 1 + try: + dtype = dtype_info_name(col.dtype) + except AttributeError: + dtype = 'object' + yield six.text_type(dtype) + if show_unit or show_name or show_dtype: + i_dashes = n_header + n_header += 1 + yield '---' + + max_lines -= n_header + n_print2 = max_lines // 2 + n_rows = len(col) + + # This block of code is responsible for producing the function that + # will format values for this column. The ``format_func`` function + # takes two args (col_format, val) and returns the string-formatted + # version. Some points to understand: + # + # - col_format could itself be the formatting function, so it will + # actually end up being called with itself as the first arg. In + # this case the function is expected to ignore its first arg. + # + # - auto_format_func is a function that gets called on the first + # column value that is being formatted. It then determines an + # appropriate formatting function given the actual value to be + # formatted. This might be deterministic or it might involve + # try/except. The latter allows for different string formatting + # options like %f or {:5.3f}. When auto_format_func is called it: + + # 1. Caches the function in the _format_funcs dict so for subsequent + # values the right function is called right away. + # 2. Returns the formatted value. + # + # - possible_string_format_functions is a function that yields a + # succession of functions that might successfully format the + # value. There is a default, but Mixin methods can override this. + # See Quantity for an example. + # + # - get_auto_format_func() returns a wrapped version of auto_format_func + # with the column id and possible_string_format_functions as + # enclosed variables. + col_format = col.info.format or getattr(col.info, 'default_format', None) + pssf = (getattr(col.info, 'possible_string_format_functions', None) or + _possible_string_format_functions) + auto_format_func = get_auto_format_func(col, pssf) + format_func = col.info._format_funcs.get(col_format, auto_format_func) + + if len(col) > max_lines: + if show_length is None: + show_length = True + i0 = n_print2 - (1 if show_length else 0) + i1 = n_rows - n_print2 - max_lines % 2 + ii = np.concatenate([np.arange(0, i0 + 1), np.arange(i1 + 1, len(col))]) + else: + i0 = -1 + ii = np.arange(len(col)) + + # Add formatted values if within bounds allowed by max_lines + for i in ii: + if i == i0: + yield '...' + else: + if multidims: + # Prevents columns like Column(data=[[(1,)],[(2,)]], name='a') + # with shape (n,1,...,1) from being printed as if there was + # more than one element in a row + if trivial_multidims: + col_str = format_func(col_format, col[(i,) + multidim0]) + else: + col_str = (format_func(col_format, col[(i,) + multidim0]) + + ' .. ' + + format_func(col_format, col[(i,) + multidim1])) + else: + col_str = format_func(col_format, col[i]) + yield col_str + + outs['show_length'] = show_length + outs['n_header'] = n_header + outs['i_centers'] = i_centers + outs['i_dashes'] = i_dashes + + def _pformat_table(self, table, max_lines=None, max_width=None, + show_name=True, show_unit=None, show_dtype=False, + html=False, tableid=None, tableclass=None, align=None): + """Return a list of lines for the formatted string representation of + the table. + + Parameters + ---------- + max_lines : int or None + Maximum number of rows to output + + max_width : int or None + Maximum character width of output + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is False. + + html : bool + Format the output as an HTML table. Default is False. + + tableid : str or None + An ID tag for the table; only used if html is set. Default is + "table{id}", where id is the unique integer id of the table object, + id(table) + + tableclass : str or list of str or `None` + CSS classes for the table; only used if html is set. Default is + none + + align : str or list or tuple + Left/right alignment of columns. Default is '>' (right) for all + columns. Other allowed values are '<', '^', and '0=' for left, + centered, and 0-padded, respectively. A list of strings can be + provided for alignment of tables with multiple columns. + + Returns + ------- + rows : list + Formatted table as a list of strings + + outs : dict + Dict which is used to pass back additional values + defined within the iterator. + + """ + # "Print" all the values into temporary lists by column for subsequent + # use and to determine the width + max_lines, max_width = self._get_pprint_size(max_lines, max_width) + cols = [] + + if show_unit is None: + show_unit = any(col.info.unit for col in six.itervalues(table.columns)) + + # Coerce align into a correctly-sized list of alignments (if possible) + n_cols = len(table.columns) + if align is None or isinstance(align, six.string_types): + align = [align] * n_cols + + elif isinstance(align, (list, tuple)): + if len(align) != n_cols: + raise ValueError('got {0} alignment values instead of ' + 'the number of columns ({1})' + .format(len(align), n_cols)) + else: + raise TypeError('align keyword must be str or list or tuple (got {0})' + .format(type(align))) + + for align_, col in zip(align, table.columns.values()): + lines, outs = self._pformat_col(col, max_lines, show_name=show_name, + show_unit=show_unit, show_dtype=show_dtype, + align=align_) + if outs['show_length']: + lines = lines[:-1] + cols.append(lines) + + if not cols: + return [''], {'show_length': False} + + # Use the values for the last column since they are all the same + n_header = outs['n_header'] + + n_rows = len(cols[0]) + outwidth = lambda cols: sum(len(c[0]) for c in cols) + len(cols) - 1 + dots_col = ['...'] * n_rows + middle = len(cols) // 2 + while outwidth(cols) > max_width: + if len(cols) == 1: + break + if len(cols) == 2: + cols[1] = dots_col + break + if cols[middle] is dots_col: + cols.pop(middle) + middle = len(cols) // 2 + cols[middle] = dots_col + + # Now "print" the (already-stringified) column values into a + # row-oriented list. + rows = [] + if html: + from ..utils.xml.writer import xml_escape + + if tableid is None: + tableid = 'table{id}'.format(id=id(table)) + + if tableclass is not None: + if isinstance(tableclass, list): + tableclass = ' '.join(tableclass) + rows.append(''.format( + tid=tableid, tcls=tableclass)) + else: + rows.append('
'.format(tid=tableid)) + + for i in range(n_rows): + # _pformat_col output has a header line '----' which is not needed here + if i == n_header - 1: + continue + td = 'th' if i < n_header else 'td' + vals = ('<{0}>{1}'.format(td, xml_escape(col[i].strip()), td) + for col in cols) + row = ('' + ''.join(vals) + '') + if i < n_header: + row = ('' + row + '') + rows.append(row) + rows.append('
') + else: + for i in range(n_rows): + row = ' '.join(col[i] for col in cols) + rows.append(row) + + return rows, outs + + def _more_tabcol(self, tabcol, max_lines=None, max_width=None, + show_name=True, show_unit=None, show_dtype=False): + """Interactive "more" of a table or column. + + Parameters + ---------- + max_lines : int or None + Maximum number of rows to output + + max_width : int or None + Maximum character width of output + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is False. + """ + allowed_keys = 'f br<>qhpn' + + # Count the header lines + n_header = 0 + if show_name: + n_header += 1 + if show_unit: + n_header += 1 + if show_dtype: + n_header += 1 + if show_name or show_unit or show_dtype: + n_header += 1 + + # Set up kwargs for pformat call. Only Table gets max_width. + kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit, + show_dtype=show_dtype) + if hasattr(tabcol, 'columns'): # tabcol is a table + kwargs['max_width'] = max_width + + # If max_lines is None (=> query screen size) then increase by 2. + # This is because get_pprint_size leaves 6 extra lines so that in + # ipython you normally see the last input line. + max_lines1, max_width = self._get_pprint_size(max_lines, max_width) + if max_lines is None: + max_lines1 += 2 + delta_lines = max_lines1 - n_header + + # Set up a function to get a single character on any platform + inkey = Getch() + + i0 = 0 # First table/column row to show + showlines = True + while True: + i1 = i0 + delta_lines # Last table/col row to show + if showlines: # Don't always show the table (e.g. after help) + try: + os.system('cls' if os.name == 'nt' else 'clear') + except Exception: + pass # No worries if clear screen call fails + lines = tabcol[i0:i1].pformat(**kwargs) + colors = ('red' if i < n_header else 'default' + for i in range(len(lines))) + for color, line in zip(colors, lines): + color_print(line, color) + showlines = True + print() + print("-- f, , b, r, p, n, <, >, q h (help) --", end=' ') + # Get a valid key + while True: + try: + key = inkey().lower() + except Exception: + print("\n") + log.error('Console does not support getting a character' + ' as required by more(). Use pprint() instead.') + return + if key in allowed_keys: + break + print(key) + + if key.lower() == 'q': + break + elif key == ' ' or key == 'f': + i0 += delta_lines + elif key == 'b': + i0 = i0 - delta_lines + elif key == 'r': + pass + elif key == '<': + i0 = 0 + elif key == '>': + i0 = len(tabcol) + elif key == 'p': + i0 -= 1 + elif key == 'n': + i0 += 1 + elif key == 'h': + showlines = False + print(""" + Browsing keys: + f, : forward one page + b : back one page + r : refresh same page + n : next row + p : previous row + < : go to beginning + > : go to end + q : quit browsing + h : print this help""", end=' ') + if i0 < 0: + i0 = 0 + if i0 >= len(tabcol) - delta_lines: + i0 = len(tabcol) - delta_lines + print("\n") diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/pprint.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/pprint.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c38a8052f82eead48117fd78b89d109344f3112 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/pprint.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/row.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/row.py new file mode 100644 index 0000000000000000000000000000000000000000..4783c4758e7f1b1319c5cbe156e1c783aa943c70 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/row.py @@ -0,0 +1,177 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import collections +import operator + +import numpy as np + +from ..extern import six + + +class Row(object): + """A class to represent one row of a Table object. + + A Row object is returned when a Table object is indexed with an integer + or when iterating over a table:: + + >>> from astropy.table import Table + >>> table = Table([(1, 2), (3, 4)], names=('a', 'b'), + ... dtype=('int32', 'int32')) + >>> row = table[1] + >>> row + + a b + int32 int32 + ----- ----- + 2 4 + >>> row['a'] + 2 + >>> row[1] + 4 + """ + + def __init__(self, table, index): + self._table = table + self._index = operator.index(index) + + n = len(table) + if index < -n or index >= n: + raise IndexError('index {0} out of range for table with length {1}' + .format(index, len(table))) + + def __getitem__(self, item): + return self._table.columns[item][self._index] + + def __setitem__(self, item, val): + self._table.columns[item][self._index] = val + + def __eq__(self, other): + if self._table.masked: + # Sent bug report to numpy-discussion group on 2012-Oct-21, subject: + # "Comparing rows in a structured masked array raises exception" + # No response, so this is still unresolved. + raise ValueError('Unable to compare rows for masked table due to numpy.ma bug') + return self.as_void() == other + + def __ne__(self, other): + if self._table.masked: + raise ValueError('Unable to compare rows for masked table due to numpy.ma bug') + return self.as_void() != other + + def __array__(self, dtype=None): + """Support converting Row to np.array via np.array(table). + + Coercion to a different dtype via np.array(table, dtype) is not + supported and will raise a ValueError. + + If the parent table is masked then the mask information is dropped. + """ + if dtype is not None: + raise ValueError('Datatype coercion is not allowed') + + return np.asarray(self.as_void()) + + def __len__(self): + return len(self._table.columns) + + def __iter__(self): + index = self._index + for col in six.itervalues(self._table.columns): + yield col[index] + + @property + def table(self): + return self._table + + @property + def index(self): + return self._index + + def as_void(self): + """ + Returns a *read-only* copy of the row values in the form of np.void or + np.ma.mvoid objects. This corresponds to the object types returned for + row indexing of a pure numpy structured array or masked array. This + method is slow and its use is discouraged when possible. + + Returns + ------- + void_row : np.void (unmasked) or np.ma.mvoid (masked) + Copy of row values + """ + index = self._index + cols = self._table.columns.values() + vals = tuple(np.asarray(col)[index] for col in cols) + if self._table.masked: + # The logic here is a little complicated to work around + # bug in numpy < 1.8 (numpy/numpy#483). Need to build up + # a np.ma.mvoid object by hand. + from .table import descr + + # Make np.void version of masks. Use the table dtype but + # substitute bool for data type + masks = tuple(col.mask[index] if hasattr(col, 'mask') else False + for col in cols) + descrs = (descr(col) for col in cols) + mask_dtypes = [(name, np.bool, shape) for name, type_, shape in descrs] + row_mask = np.array([masks], dtype=mask_dtypes)[0] + + # Make np.void version of values, and then the final mvoid row + row_vals = np.array([vals], dtype=self.dtype)[0] + void_row = np.ma.mvoid(data=row_vals, mask=row_mask) + else: + void_row = np.array([vals], dtype=self.dtype)[0] + return void_row + + @property + def meta(self): + return self._table.meta + + @property + def columns(self): + return self._table.columns + + @property + def colnames(self): + return self._table.colnames + + @property + def dtype(self): + return self._table.dtype + + def _base_repr_(self, html=False): + """ + Display row as a single-line table but with appropriate header line. + """ + index = self.index if (self.index >= 0) else self.index + len(self._table) + table = self._table[index:index + 1] + descr_vals = [self.__class__.__name__, + 'index={0}'.format(self.index)] + if table.masked: + descr_vals.append('masked=True') + + return table._base_repr_(html, descr_vals, max_width=-1, + tableid='table{0}'.format(id(self._table))) + + def _repr_html_(self): + return self._base_repr_(html=True) + + def __repr__(self): + return self._base_repr_(html=False) + + def __unicode__(self): + index = self.index if (self.index >= 0) else self.index + len(self._table) + return '\n'.join(self.table[index:index + 1].pformat(max_width=-1)) + if not six.PY2: + __str__ = __unicode__ + + def __bytes__(self): + return six.text_type(self).encode('utf-8') + if six.PY2: + __str__ = __bytes__ + + +collections.Sequence.register(Row) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/row.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/row.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17a717cf633996b07af2265109b925f2f8aa9f61 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/row.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/serialize.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/serialize.py new file mode 100644 index 0000000000000000000000000000000000000000..a66c2ed1ef42fa13c9232e5435afc14a8eea76e7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/serialize.py @@ -0,0 +1,211 @@ +from importlib import import_module +import re +from copy import deepcopy + +from ..utils.data_info import MixinInfo +from .column import Column +from .table import Table, QTable, has_info_class +from ..units.quantity import QuantityInfo + + +__construct_mixin_classes = ('astropy.time.core.Time', + 'astropy.time.core.TimeDelta', + 'astropy.units.quantity.Quantity', + 'astropy.coordinates.angles.Latitude', + 'astropy.coordinates.angles.Longitude', + 'astropy.coordinates.angles.Angle', + 'astropy.coordinates.distances.Distance', + 'astropy.coordinates.earth.EarthLocation', + 'astropy.coordinates.sky_coordinate.SkyCoord', + 'astropy.table.table.NdarrayMixin') + + +class SerializedColumn(dict): + """ + Subclass of dict that is a used in the representation to contain the name + (and possible other info) for a mixin attribute (either primary data or an + array-like attribute) that is serialized as a column in the table. + + Normally contains the single key ``name`` with the name of the column in the + table. + """ + pass + + +def _represent_mixin_as_column(col, name, new_cols, mixin_cols): + """Convert a mixin column to a plain columns or a set of mixin columns.""" + if not has_info_class(col, MixinInfo): + new_cols.append(col) + return + + # Subtlety here is handling mixin info attributes. The basic list of such + # attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'. + # - name: handled directly [DON'T store] + # - unit: DON'T store if this is a parent attribute + # - dtype: captured in plain Column if relevant [DON'T store] + # - format: possibly irrelevant but settable post-object creation [DO store] + # - description: DO store + # - meta: DO store + info = {} + for attr, nontrivial, xform in (('unit', lambda x: x not in (None, ''), str), + ('format', lambda x: x is not None, None), + ('description', lambda x: x is not None, None), + ('meta', lambda x: x, None)): + col_attr = getattr(col.info, attr) + if nontrivial(col_attr): + info[attr] = xform(col_attr) if xform else col_attr + + obj_attrs = col.info._represent_as_dict() + ordered_keys = col.info._represent_as_dict_attrs + + data_attrs = [key for key in ordered_keys if key in obj_attrs and + getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]] + + for data_attr in data_attrs: + data = obj_attrs[data_attr] + if len(data_attrs) == 1 and not has_info_class(data, MixinInfo): + # For one non-mixin attribute, we need only one serialized column. + # We can store info there, and keep the column name as is. + new_cols.append(Column(data, name=name, **info)) + obj_attrs[data_attr] = SerializedColumn({'name': name}) + # Remove attributes that are already on the serialized column. + for attr in info: + if attr in obj_attrs: + del obj_attrs[attr] + + else: + # New column name combines the old name and attribute + # (e.g. skycoord.ra, skycoord.dec). + new_name = name + '.' + data_attr + # TODO masking, MaskedColumn + if not has_info_class(data, MixinInfo): + new_cols.append(Column(data, name=new_name)) + obj_attrs[data_attr] = SerializedColumn({'name': new_name}) + else: + # recurse. This will define obj_attrs[new_name]. + _represent_mixin_as_column(data, new_name, new_cols, obj_attrs) + obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name)) + + # Strip out from info any attributes defined by the parent + for attr in col.info.attrs_from_parent: + if attr in info: + del info[attr] + + if info: + obj_attrs['__info__'] = info + + # Store the fully qualified class name + obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__ + + mixin_cols[name] = obj_attrs + + +def _represent_mixins_as_columns(tbl): + """ + Convert any mixin columns to plain Column or MaskedColumn and + return a new table. + """ + if not tbl.has_mixin_columns: + return tbl + + mixin_cols = {} + + new_cols = [] + + for col in tbl.itercols(): + _represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols) + + meta = deepcopy(tbl.meta) + meta['__serialized_columns__'] = mixin_cols + out = Table(new_cols, meta=meta, copy=False) + + return out + + +def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info): + cls_full_name = obj_attrs.pop('__class__') + + # If this is a supported class then import the class and run + # the _construct_from_col method. Prevent accidentally running + # untrusted code by only importing known astropy classes. + if cls_full_name not in __construct_mixin_classes: + raise ValueError('unsupported class for construct {}'.format(cls_full_name)) + + mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups() + module = import_module(mod_name) + cls = getattr(module, cls_name) + for attr, value in info.items(): + if attr in cls.info.attrs_from_parent: + obj_attrs[attr] = value + mixin = cls.info._construct_from_dict(obj_attrs) + for attr, value in info.items(): + if attr not in obj_attrs: + setattr(mixin.info, attr, value) + return mixin + + +def _construct_mixin_from_columns(new_name, obj_attrs, out): + data_attrs_map = {} + for name, val in obj_attrs.items(): + if isinstance(val, SerializedColumn): + if 'name' in val: + data_attrs_map[val['name']] = name + else: + _construct_mixin_from_columns(name, val, out) + data_attrs_map[name] = name + + for name in data_attrs_map.values(): + del obj_attrs[name] + + # Get the index where to add new column + idx = min(out.colnames.index(name) for name in data_attrs_map) + + # Name is the column name in the table (e.g. "coord.ra") and + # data_attr is the object attribute name (e.g. "ra"). A different + # example would be a formatted time object that would have (e.g.) + # "time_col" and "value", respectively. + for name, data_attr in data_attrs_map.items(): + col = out[name] + obj_attrs[data_attr] = col + del out[name] + + info = obj_attrs.pop('__info__', {}) + if len(data_attrs_map) == 1: + # col is the first and only serialized column; in that case, use info + # stored on the column. + for attr, nontrivial in (('unit', lambda x: x not in (None, '')), + ('format', lambda x: x is not None), + ('description', lambda x: x is not None), + ('meta', lambda x: x)): + col_attr = getattr(col.info, attr) + if nontrivial(col_attr): + info[attr] = col_attr + + info['name'] = new_name + col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info) + out.add_column(col, index=idx) + + +def _construct_mixins_from_columns(tbl): + if '__serialized_columns__' not in tbl.meta: + return tbl + + # Don't know final output class but assume QTable so no columns get + # downgraded. + out = QTable(tbl, copy=False) + + mixin_cols = out.meta.pop('__serialized_columns__') + + for new_name, obj_attrs in mixin_cols.items(): + _construct_mixin_from_columns(new_name, obj_attrs, out) + + # If no quantity subclasses are in the output then output as Table. + # For instance ascii.read(file, format='ecsv') doesn't specify an + # output class and should return the minimal table class that + # represents the table file. + has_quantities = any(isinstance(col.info, QuantityInfo) + for col in out.itercols()) + if not has_quantities: + out = Table(out, copy=False) + + return out diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/serialize.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/serialize.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a1038c3186b4477623a5090f39c4ff3fd2631b0 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/serialize.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/setup_package.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/setup_package.py new file mode 100644 index 0000000000000000000000000000000000000000..a807ae1e9e22bee7024f6b2a77484d0baa5f5b60 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/setup_package.py @@ -0,0 +1,25 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import + +import os +from distutils.extension import Extension + +ROOT = os.path.relpath(os.path.dirname(__file__)) + + +def get_extensions(): + sources = ["_np_utils.pyx", "_column_mixins.pyx"] + include_dirs = ['numpy'] + + exts = [ + Extension(name='astropy.table.' + os.path.splitext(source)[0], + sources=[os.path.join(ROOT, source)], + include_dirs=include_dirs) + for source in sources + ] + + return exts + + +def requires_2to3(): + return False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/setup_package.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/setup_package.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9b76d792734ba9694efed0c54fc2aabb0b32e85 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/setup_package.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/sorted_array.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/sorted_array.py new file mode 100644 index 0000000000000000000000000000000000000000..7c58b03ac744f185317fd740e6a00d2ff5675b4d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/sorted_array.py @@ -0,0 +1,317 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +import numpy as np +from ..extern.six.moves import range, zip + + +def _searchsorted(array, val, side='left'): + ''' + Call np.searchsorted or use a custom binary + search if necessary. + ''' + if hasattr(array, 'searchsorted'): + return array.searchsorted(val, side=side) + # Python binary search + begin = 0 + end = len(array) + while begin < end: + mid = (begin + end) // 2 + if val > array[mid]: + begin = mid + 1 + elif val < array[mid]: + end = mid + elif side == 'right': + begin = mid + 1 + else: + end = mid + return begin + + +class SortedArray(object): + ''' + Implements a sorted array container using + a list of numpy arrays. + + Parameters + ---------- + data : Table + Sorted columns of the original table + row_index : Column object + Row numbers corresponding to data columns + unique : bool (defaults to False) + Whether the values of the index must be unique + ''' + + def __init__(self, data, row_index, unique=False): + self.data = data + self.row_index = row_index + self.num_cols = len(getattr(data, 'colnames', [])) + self.unique = unique + + @property + def cols(self): + return self.data.columns.values() + + def add(self, key, row): + ''' + Add a new entry to the sorted array. + + Parameters + ---------- + key : tuple + Column values at the given row + row : int + Row number + ''' + pos = self.find_pos(key, row) # first >= key + + if self.unique and 0 <= pos < len(self.row_index) and \ + all(self.data[pos][i] == key[i] for i in range(len(key))): + # already exists + raise ValueError('Cannot add duplicate value "{0}" in a ' + 'unique index'.format(key)) + self.data.insert_row(pos, key) + self.row_index = self.row_index.insert(pos, row) + + def _get_key_slice(self, i, begin, end): + ''' + Retrieve the ith slice of the sorted array + from begin to end. + ''' + if i < self.num_cols: + return self.cols[i][begin:end] + else: + return self.row_index[begin:end] + + def find_pos(self, key, data, exact=False): + ''' + Return the index of the largest key in data greater than or + equal to the given key, data pair. + + Parameters + ---------- + key : tuple + Column key + data : int + Row number + exact : bool + If True, return the index of the given key in data + or -1 if the key is not present. + ''' + begin = 0 + end = len(self.row_index) + num_cols = self.num_cols + if not self.unique: + # consider the row value as well + key = key + (data,) + num_cols += 1 + + # search through keys in lexicographic order + for i in range(num_cols): + key_slice = self._get_key_slice(i, begin, end) + t = _searchsorted(key_slice, key[i]) + # t is the smallest index >= key[i] + if exact and (t == len(key_slice) or key_slice[t] != key[i]): + # no match + return -1 + elif t == len(key_slice) or (t == 0 and len(key_slice) > 0 and + key[i] < key_slice[0]): + # too small or too large + return begin + t + end = begin + _searchsorted(key_slice, key[i], side='right') + begin += t + if begin >= len(self.row_index): # greater than all keys + return begin + + return begin + + def find(self, key): + ''' + Find all rows matching the given key. + + Parameters + ---------- + key : tuple + Column values + + Returns + ------- + matching_rows : list + List of rows matching the input key + ''' + begin = 0 + end = len(self.row_index) + + # search through keys in lexicographic order + for i in range(self.num_cols): + key_slice = self._get_key_slice(i, begin, end) + t = _searchsorted(key_slice, key[i]) + # t is the smallest index >= key[i] + if t == len(key_slice) or key_slice[t] != key[i]: + # no match + return [] + elif t == 0 and len(key_slice) > 0 and key[i] < key_slice[0]: + # too small or too large + return [] + end = begin + _searchsorted(key_slice, key[i], side='right') + begin += t + if begin >= len(self.row_index): # greater than all keys + return [] + + return self.row_index[begin:end] + + def range(self, lower, upper, bounds): + ''' + Find values in the given range. + + Parameters + ---------- + lower : tuple + Lower search bound + upper : tuple + Upper search bound + bounds : tuple (x, y) of bools + Indicates whether the search should be inclusive or + exclusive with respect to the endpoints. The first + argument x corresponds to an inclusive lower bound, + and the second argument y to an inclusive upper bound. + ''' + lower_pos = self.find_pos(lower, 0) + upper_pos = self.find_pos(upper, 0) + if lower_pos == len(self.row_index): + return [] + + lower_bound = tuple([col[lower_pos] for col in self.cols]) + if not bounds[0] and lower_bound == lower: + lower_pos += 1 # data[lower_pos] > lower + + # data[lower_pos] >= lower + # data[upper_pos] >= upper + if upper_pos < len(self.row_index): + upper_bound = tuple([col[upper_pos] for col in self.cols]) + if not bounds[1] and upper_bound == upper: + upper_pos -= 1 # data[upper_pos] < upper + elif upper_bound > upper: + upper_pos -= 1 # data[upper_pos] <= upper + return self.row_index[lower_pos:upper_pos + 1] + + def remove(self, key, data): + ''' + Remove the given entry from the sorted array. + + Parameters + ---------- + key : tuple + Column values + data : int + Row number + + Returns + ------- + successful : bool + Whether the entry was successfully removed + ''' + pos = self.find_pos(key, data, exact=True) + if pos == -1: # key not found + return False + + self.data.remove_row(pos) + keep_mask = np.ones(len(self.row_index), dtype=np.bool) + keep_mask[pos] = False + self.row_index = self.row_index[keep_mask] + return True + + def shift_left(self, row): + ''' + Decrement all row numbers greater than the input row. + + Parameters + ---------- + row : int + Input row number + ''' + self.row_index[self.row_index > row] -= 1 + + def shift_right(self, row): + ''' + Increment all row numbers greater than or equal to the input row. + + Parameters + ---------- + row : int + Input row number + ''' + self.row_index[self.row_index >= row] += 1 + + def replace_rows(self, row_map): + ''' + Replace all rows with the values they map to in the + given dictionary. Any rows not present as keys in + the dictionary will have their entries deleted. + + Parameters + ---------- + row_map : dict + Mapping of row numbers to new row numbers + ''' + num_rows = len(row_map) + keep_rows = np.zeros(len(self.row_index), dtype=np.bool) + tagged = 0 + for i, row in enumerate(self.row_index): + if row in row_map: + keep_rows[i] = True + tagged += 1 + if tagged == num_rows: + break + + self.data = self.data[keep_rows] + self.row_index = np.array( + [row_map[x] for x in self.row_index[keep_rows]]) + + def items(self): + ''' + Retrieve all array items as a list of pairs of the form + [(key, [row 1, row 2, ...]), ...] + ''' + array = [] + last_key = None + for i, key in enumerate(zip(*self.data.columns.values())): + row = self.row_index[i] + if key == last_key: + array[-1][1].append(row) + else: + last_key = key + array.append((key, [row])) + return array + + def sort(self): + ''' + Make row order align with key order. + ''' + self.row_index = np.arange(len(self.row_index)) + + def sorted_data(self): + ''' + Return rows in sorted order. + ''' + return self.row_index + + def __getitem__(self, item): + ''' + Return a sliced reference to this sorted array. + + Parameters + ---------- + item : slice + Slice to use for referencing + ''' + return SortedArray(self.data[item], self.row_index[item]) + + def __repr__(self): + t = self.data.copy() + t['rows'] = self.row_index + return str(t) + + def __str__(self): + return repr(self) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/sorted_array.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/sorted_array.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6256d79923d9a9d0b41eeda92e92f0cae617cca9 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/sorted_array.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table.py new file mode 100644 index 0000000000000000000000000000000000000000..fd14c907db2d8e3a6ee1c0d0f7872c6f0e4c449f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table.py @@ -0,0 +1,2857 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six +from ..extern.six.moves import zip, range +from .index import TableIndices, TableLoc, TableILoc + +import re +import sys +from collections import OrderedDict, Mapping +import warnings +from copy import deepcopy + +import numpy as np +from numpy import ma + +from .. import log +from ..io import registry as io_registry +from ..units import Quantity, QuantityInfo +from ..utils import isiterable, ShapedLikeNDArray +from ..utils.compat.numpy import broadcast_to as np_broadcast_to +from ..utils.console import color_print +from ..utils.metadata import MetaData +from ..utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo +from . import groups +from .pprint import TableFormatter +from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray, + col_copy) +from .row import Row +from .np_utils import fix_column_name, recarray_fromrecords +from .info import TableInfo +from .index import Index, _IndexModeContext, get_index +from . import conf + + +__doctest_skip__ = ['Table.read', 'Table.write', + 'Table.convert_bytestring_to_unicode', + 'Table.convert_unicode_to_bytestring', + ] + + +class TableReplaceWarning(UserWarning): + """ + Warning class for cases when a table column is replaced via the + Table.__setitem__ syntax e.g. t['a'] = val. + + This does not inherit from AstropyWarning because we want to use + stacklevel=3 to show the user where the issue occurred in their code. + """ + pass + + +def descr(col): + """Array-interface compliant full description of a column. + + This returns a 3-tuple (name, type, shape) that can always be + used in a structured array dtype definition. + """ + col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype + col_shape = col.shape[1:] if hasattr(col, 'shape') else () + return (col.info.name, col_dtype, col_shape) + + +def has_info_class(obj, cls): + return hasattr(obj, 'info') and isinstance(obj.info, cls) + + +class TableColumns(OrderedDict): + """OrderedDict subclass for a set of columns. + + This class enhances item access to provide convenient access to columns + by name or index, including slice access. It also handles renaming + of columns. + + The initialization argument ``cols`` can be a list of ``Column`` objects + or any structure that is valid for initializing a Python dict. This + includes a dict, list of (key, val) tuples or [key, val] lists, etc. + + Parameters + ---------- + cols : dict, list, tuple; optional + Column objects as data structure that can init dict (see above) + """ + + def __init__(self, cols={}): + if isinstance(cols, (list, tuple)): + # `cols` should be a list of two-tuples, but it is allowed to have + # columns (BaseColumn or mixins) in the list. + newcols = [] + for col in cols: + if has_info_class(col, BaseColumnInfo): + newcols.append((col.info.name, col)) + else: + newcols.append(col) + cols = newcols + super(TableColumns, self).__init__(cols) + + def __getitem__(self, item): + """Get items from a TableColumns object. + :: + + tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')]) + tc['a'] # Column('a') + tc[1] # Column('b') + tc['a', 'b'] # + tc[1:3] # + """ + if isinstance(item, six.string_types): + return OrderedDict.__getitem__(self, item) + elif isinstance(item, (int, np.integer)): + return self.values()[item] + elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'): + return self.values()[item.item()] + elif isinstance(item, tuple): + return self.__class__([self[x] for x in item]) + elif isinstance(item, slice): + return self.__class__([self[x] for x in list(self)[item]]) + else: + raise IndexError('Illegal key or index value for {} object' + .format(self.__class__.__name__)) + + def __setitem__(self, item, value): + if item in self: + raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead." + .format(item)) + super(TableColumns, self).__setitem__(item, value) + + def __repr__(self): + names = ("'{0}'".format(x) for x in six.iterkeys(self)) + return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__) + + def _rename_column(self, name, new_name): + if name == new_name: + return + + if new_name in self: + raise KeyError("Column {0} already exists".format(new_name)) + + mapper = {name: new_name} + new_names = [mapper.get(name, name) for name in self] + cols = list(six.itervalues(self)) + self.clear() + self.update(list(zip(new_names, cols))) + + # Define keys and values for Python 2 and 3 source compatibility + def keys(self): + return list(OrderedDict.keys(self)) + + def values(self): + return list(OrderedDict.values(self)) + + def isinstance(self, cls): + """ + Return a list of columns which are instances of the specified classes. + + Parameters + ---------- + cls : class or tuple of classes + Column class (including mixin) or tuple of Column classes. + + Returns + ------- + col_list : list of Columns + List of Column objects which are instances of given classes. + """ + cols = [col for col in self.values() if isinstance(col, cls)] + return cols + + def not_isinstance(self, cls): + """ + Return a list of columns which are not instances of the specified classes. + + Parameters + ---------- + cls : class or tuple of classes + Column class (including mixin) or tuple of Column classes. + + Returns + ------- + col_list : list of Columns + List of Column objects which are not instances of given classes. + """ + cols = [col for col in self.values() if not isinstance(col, cls)] + return cols + + +class Table(object): + """A class to represent tables of heterogeneous data. + + `Table` provides a class for heterogeneous tabular data, making use of a + `numpy` structured array internally to store the data values. A key + enhancement provided by the `Table` class is the ability to easily modify + the structure of the table by adding or removing columns, or adding new + rows of data. In addition table and column metadata are fully supported. + + `Table` differs from `~astropy.nddata.NDData` by the assumption that the + input data consists of columns of homogeneous data, where each column + has a unique identifier and may contain additional metadata such as the + data unit, format, and description. + + Parameters + ---------- + data : numpy ndarray, dict, list, Table, or table-like object, optional + Data to initialize table. + masked : bool, optional + Specify whether the table is masked. + names : list, optional + Specify column names. + dtype : list, optional + Specify column data types. + meta : dict, optional + Metadata associated with the table. + copy : bool, optional + Copy the input data. If the input is a Table the ``meta`` is always + copied regardless of the ``copy`` parameter. + Default is True. + rows : numpy ndarray, list of lists, optional + Row-oriented data for table instead of ``data`` argument. + copy_indices : bool, optional + Copy any indices in the input data. Default is True. + **kwargs : dict, optional + Additional keyword args when converting table-like object. + """ + + meta = MetaData() + + # Define class attributes for core container objects to allow for subclass + # customization. + Row = Row + Column = Column + MaskedColumn = MaskedColumn + TableColumns = TableColumns + TableFormatter = TableFormatter + + def as_array(self, keep_byteorder=False): + """ + Return a new copy of the table in the form of a structured np.ndarray or + np.ma.MaskedArray object (as appropriate). + + Parameters + ---------- + keep_byteorder : bool, optional + By default the returned array has all columns in native byte + order. However, if this option is `True` this preserves the + byte order of all columns (if any are non-native). + + Returns + ------- + table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked) + Copy of table as a numpy structured array + """ + if len(self.columns) == 0: + return None + + sys_byteorder = ('>', '<')[sys.byteorder == 'little'] + native_order = ('=', sys_byteorder) + + dtype = [] + + cols = self.columns.values() + + for col in cols: + col_descr = descr(col) + byteorder = col.info.dtype.byteorder + + if not keep_byteorder and byteorder not in native_order: + new_dt = np.dtype(col_descr[1]).newbyteorder('=') + col_descr = (col_descr[0], new_dt, col_descr[2]) + + dtype.append(col_descr) + + empty_init = ma.empty if self.masked else np.empty + data = empty_init(len(self), dtype=dtype) + for col in cols: + # When assigning from one array into a field of a structured array, + # Numpy will automatically swap those columns to their destination + # byte order where applicable + data[col.info.name] = col + + return data + + def __init__(self, data=None, masked=None, names=None, dtype=None, + meta=None, copy=True, rows=None, copy_indices=True, + **kwargs): + + # Set up a placeholder empty table + self._set_masked(masked) + self.columns = self.TableColumns() + self.meta = meta + self.formatter = self.TableFormatter() + self._copy_indices = True # copy indices from this Table by default + self._init_indices = copy_indices # whether to copy indices in init + self.primary_key = None + + # Must copy if dtype are changing + if not copy and dtype is not None: + raise ValueError('Cannot specify dtype when copy=False') + + # Row-oriented input, e.g. list of lists or list of tuples, list of + # dict, Row instance. Set data to something that the subsequent code + # will parse correctly. + is_list_of_dict = False + if rows is not None: + if data is not None: + raise ValueError('Cannot supply both `data` and `rows` values') + if all(isinstance(row, dict) for row in rows): + is_list_of_dict = True # Avoid doing the all(...) test twice. + data = rows + elif isinstance(rows, self.Row): + data = rows + else: + rec_data = recarray_fromrecords(rows) + data = [rec_data[name] for name in rec_data.dtype.names] + + # Infer the type of the input data and set up the initialization + # function, number of columns, and potentially the default col names + + default_names = None + + if hasattr(data, '__astropy_table__'): + # Data object implements the __astropy_table__ interface method. + # Calling that method returns an appropriate instance of + # self.__class__ and respects the `copy` arg. The returned + # Table object should NOT then be copied (though the meta + # will be deep-copied anyway). + data = data.__astropy_table__(self.__class__, copy, **kwargs) + copy = False + elif kwargs: + raise TypeError('__init__() got unexpected keyword argument {!r}' + .format(list(kwargs.keys())[0])) + + if (isinstance(data, np.ndarray) and + data.shape == (0,) and + not data.dtype.names): + data = None + + if isinstance(data, self.Row): + data = data._table[data._index:data._index + 1] + + if isinstance(data, (list, tuple)): + init_func = self._init_from_list + if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)): + n_cols = len(data[0]) + else: + n_cols = len(data) + + elif isinstance(data, np.ndarray): + if data.dtype.names: + init_func = self._init_from_ndarray # _struct + n_cols = len(data.dtype.names) + default_names = data.dtype.names + else: + init_func = self._init_from_ndarray # _homog + if data.shape == (): + raise ValueError('Can not initialize a Table with a scalar') + elif len(data.shape) == 1: + data = data[np.newaxis, :] + n_cols = data.shape[1] + + elif isinstance(data, Mapping): + init_func = self._init_from_dict + default_names = list(data) + n_cols = len(default_names) + + elif isinstance(data, Table): + init_func = self._init_from_table + n_cols = len(data.colnames) + default_names = data.colnames + # don't copy indices if the input Table is in non-copy mode + self._init_indices = self._init_indices and data._copy_indices + + elif data is None: + if names is None: + if dtype is None: + return # Empty table + try: + # No data nor names but dtype is available. This must be + # valid to initialize a structured array. + dtype = np.dtype(dtype) + names = dtype.names + dtype = [dtype[name] for name in names] + except Exception: + raise ValueError('dtype was specified but could not be ' + 'parsed for column names') + # names is guaranteed to be set at this point + init_func = self._init_from_list + n_cols = len(names) + data = [[]] * n_cols + + else: + raise ValueError('Data type {0} not allowed to init Table' + .format(type(data))) + + # Set up defaults if names and/or dtype are not specified. + # A value of None means the actual value will be inferred + # within the appropriate initialization routine, either from + # existing specification or auto-generated. + + if names is None: + names = default_names or [None] * n_cols + if dtype is None: + dtype = [None] * n_cols + + # Numpy does not support Unicode column names on Python 2, or + # bytes column names on Python 3, so fix them up now. + names = [fix_column_name(name) for name in names] + + self._check_names_dtype(names, dtype, n_cols) + + # Finally do the real initialization + init_func(data, names, dtype, n_cols, copy) + + # Whatever happens above, the masked property should be set to a boolean + if type(self.masked) is not bool: + raise TypeError("masked property has not been set to True or False") + + def __getstate__(self): + columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col)) + for key, col in self.columns.items()) + return (columns, self.meta) + + def __setstate__(self, state): + columns, meta = state + self.__init__(columns, meta=meta) + + @property + def mask(self): + # Dynamic view of available masks + if self.masked: + mask_table = Table([col.mask for col in self.columns.values()], + names=self.colnames, copy=False) + + # Set hidden attribute to force inplace setitem so that code like + # t.mask['a'] = [1, 0, 1] will correctly set the underlying mask. + # See #5556 for discussion. + mask_table._setitem_inplace = True + else: + mask_table = None + + return mask_table + + @mask.setter + def mask(self, val): + self.mask[:] = val + + @property + def _mask(self): + """This is needed so that comparison of a masked Table and a + MaskedArray works. The requirement comes from numpy.ma.core + so don't remove this property.""" + return self.as_array().mask + + def filled(self, fill_value=None): + """Return a copy of self, with masked values filled. + + If input ``fill_value`` supplied then that value is used for all + masked entries in the table. Otherwise the individual + ``fill_value`` defined for each table column is used. + + Parameters + ---------- + fill_value : str + If supplied, this ``fill_value`` is used for all masked entries + in the entire table. + + Returns + ------- + filled_table : Table + New table with masked values filled + """ + if self.masked: + data = [col.filled(fill_value) for col in six.itervalues(self.columns)] + else: + data = self + return self.__class__(data, meta=deepcopy(self.meta)) + + @property + def indices(self): + ''' + Return the indices associated with columns of the table + as a TableIndices object. + ''' + lst = [] + for column in self.columns.values(): + for index in column.info.indices: + if sum([index is x for x in lst]) == 0: # ensure uniqueness + lst.append(index) + return TableIndices(lst) + + @property + def loc(self): + ''' + Return a TableLoc object that can be used for retrieving + rows by index in a given data range. Note that both loc + and iloc work only with single-column indices. + ''' + return TableLoc(self) + + @property + def iloc(self): + ''' + Return a TableILoc object that can be used for retrieving + indexed rows in the order they appear in the index. + ''' + return TableILoc(self) + + def add_index(self, colnames, engine=None, unique=False): + ''' + Insert a new index among one or more columns. + If there are no indices, make this index the + primary table index. + + Parameters + ---------- + colnames : str or list + List of column names (or a single column name) to index + engine : type or None + Indexing engine class to use, from among SortedArray, BST, + FastBST, and FastRBT. If the supplied argument is None (by + default), use SortedArray. + unique : bool + Whether the values of the index must be unique. Default is False. + ''' + if isinstance(colnames, six.string_types): + colnames = (colnames,) + columns = self.columns[tuple(colnames)].values() + + # make sure all columns support indexing + for col in columns: + if not getattr(col.info, '_supports_indexing', False): + raise ValueError('Cannot create an index on column "{0}", of ' + 'type "{1}"'.format(col.info.name, type(col))) + + index = Index(columns, engine=engine, unique=unique) + if not self.indices: + self.primary_key = colnames + for col in columns: + col.info.indices.append(index) + + def remove_indices(self, colname): + ''' + Remove all indices involving the given column. + If the primary index is removed, the new primary + index will be the most recently added remaining + index. + + Parameters + ---------- + colname : str + Name of column + ''' + col = self.columns[colname] + for index in self.indices: + try: + index.col_position(col.info.name) + except ValueError: + pass + else: + for c in index.columns: + c.info.indices.remove(index) + + def index_mode(self, mode): + ''' + Return a context manager for an indexing mode. + + Parameters + ---------- + mode : str + Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'. + In 'discard_on_copy' mode, + indices are not copied whenever columns or tables are copied. + In 'freeze' mode, indices are not modified whenever columns are + modified; at the exit of the context, indices refresh themselves + based on column values. This mode is intended for scenarios in + which one intends to make many additions or modifications in an + indexed column. + In 'copy_on_getitem' mode, indices are copied when taking column + slices as well as table slices, so col[i0:i1] will preserve + indices. + ''' + return _IndexModeContext(self, mode) + + def __array__(self, dtype=None): + """Support converting Table to np.array via np.array(table). + + Coercion to a different dtype via np.array(table, dtype) is not + supported and will raise a ValueError. + """ + if dtype is not None: + raise ValueError('Datatype coercion is not allowed') + + # This limitation is because of the following unexpected result that + # should have made a table copy while changing the column names. + # + # >>> d = astropy.table.Table([[1,2],[3,4]]) + # >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')]) + # array([(0, 0), (0, 0)], + # dtype=[('a', ' 1 and + not self._add_as_mixin_column(col)): + col = col.view(NdarrayMixin) + + if isinstance(col, (Column, MaskedColumn)): + col = self.ColumnClass(name=(name or col.info.name or def_name), + data=col, dtype=dtype, + copy=copy, copy_indices=self._init_indices) + elif self._add_as_mixin_column(col): + # Copy the mixin column attributes if they exist since the copy below + # may not get this attribute. + if copy: + col = col_copy(col, copy_indices=self._init_indices) + + col.info.name = name or col.info.name or def_name + elif isinstance(col, np.ndarray) or isiterable(col): + col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype, + copy=copy, copy_indices=self._init_indices) + else: + raise ValueError('Elements in list initialization must be ' + 'either Column or list-like') + + cols.append(col) + + self._init_from_cols(cols) + + def _init_from_ndarray(self, data, names, dtype, n_cols, copy): + """Initialize table from an ndarray structured array""" + + data_names = data.dtype.names or _auto_names(n_cols) + struct = data.dtype.names is not None + names = [name or data_names[i] for i, name in enumerate(names)] + + cols = ([data[name] for name in data_names] if struct else + [data[:, i] for i in range(n_cols)]) + + # Set self.masked appropriately, then get class to create column instances. + self._set_masked_from_cols(cols) + + if copy: + self._init_from_list(cols, names, dtype, n_cols, copy) + else: + dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)] + newdata = data.view(dtype).ravel() + columns = self.TableColumns() + + for name in names: + columns[name] = self.ColumnClass(name=name, data=newdata[name]) + columns[name].info.parent_table = self + self.columns = columns + + def _init_from_dict(self, data, names, dtype, n_cols, copy): + """Initialize table from a dictionary of columns""" + + # TODO: is this restriction still needed with no ndarray? + if not copy: + raise ValueError('Cannot use copy=False with a dict data input') + + data_list = [data[name] for name in names] + self._init_from_list(data_list, names, dtype, n_cols, copy) + + def _init_from_table(self, data, names, dtype, n_cols, copy): + """Initialize table from an existing Table object """ + + table = data # data is really a Table, rename for clarity + self.meta.clear() + self.meta.update(deepcopy(table.meta)) + self.primary_key = table.primary_key + cols = list(table.columns.values()) + + self._init_from_list(cols, names, dtype, n_cols, copy) + + def _convert_col_for_table(self, col): + """ + Make sure that all Column objects have correct class for this type of + Table. For a base Table this most commonly means setting to + MaskedColumn if the table is masked. Table subclasses like QTable + override this method. + """ + if col.__class__ is not self.ColumnClass and isinstance(col, Column): + col = self.ColumnClass(col) # copy attributes and reference data + return col + + def _init_from_cols(self, cols): + """Initialize table from a list of Column or mixin objects""" + + lengths = set(len(col) for col in cols) + if len(lengths) != 1: + raise ValueError('Inconsistent data column lengths: {0}' + .format(lengths)) + + # Set the table masking + self._set_masked_from_cols(cols) + + # Make sure that all Column-based objects have correct class. For + # plain Table this is self.ColumnClass, but for instance QTable will + # convert columns with units to a Quantity mixin. + newcols = [self._convert_col_for_table(col) for col in cols] + self._make_table_from_cols(self, newcols) + + # Deduplicate indices. It may happen that after pickling or when + # initing from an existing table that column indices which had been + # references to a single index object got *copied* into an independent + # object. This results in duplicates which will cause downstream problems. + index_dict = {} + for col in self.itercols(): + for i, index in enumerate(col.info.indices or []): + names = tuple(ind_col.info.name for ind_col in index.columns) + if names in index_dict: + col.info.indices[i] = index_dict[names] + else: + index_dict[names] = index + + def _new_from_slice(self, slice_): + """Create a new table as a referenced slice from self.""" + + table = self.__class__(masked=self.masked) + table.meta.clear() + table.meta.update(deepcopy(self.meta)) + table.primary_key = self.primary_key + cols = self.columns.values() + + newcols = [] + for col in cols: + col.info._copy_indices = self._copy_indices + newcol = col[slice_] + if col.info.indices: + newcol = col.info.slice_indices(newcol, slice_, len(col)) + newcols.append(newcol) + col.info._copy_indices = True + + self._make_table_from_cols(table, newcols) + return table + + @staticmethod + def _make_table_from_cols(table, cols): + """ + Make ``table`` in-place so that it represents the given list of ``cols``. + """ + colnames = set(col.info.name for col in cols) + if None in colnames: + raise TypeError('Cannot have None for column name') + if len(colnames) != len(cols): + raise ValueError('Duplicate column names') + + columns = table.TableColumns((col.info.name, col) for col in cols) + + for col in cols: + col.info.parent_table = table + if table.masked and not hasattr(col, 'mask'): + col.mask = FalseArray(col.shape) + + table.columns = columns + + def itercols(self): + """ + Iterate over the columns of this table. + + Examples + -------- + + To iterate over the columns of a table:: + + >>> t = Table([[1], [2]]) + >>> for col in t.itercols(): + ... print(col) + col0 + ---- + 1 + col1 + ---- + 2 + + Using ``itercols()`` is similar to ``for col in t.columns.values()`` + but is syntactically preferred. + """ + for colname in self.columns: + yield self[colname] + + def _base_repr_(self, html=False, descr_vals=None, max_width=None, + tableid=None, show_dtype=True, max_lines=None, + tableclass=None): + if descr_vals is None: + descr_vals = [self.__class__.__name__] + if self.masked: + descr_vals.append('masked=True') + descr_vals.append('length={0}'.format(len(self))) + + descr = '<' + ' '.join(descr_vals) + '>\n' + + if html: + from ..utils.xml.writer import xml_escape + descr = xml_escape(descr) + + if tableid is None: + tableid = 'table{id}'.format(id=id(self)) + + data_lines, outs = self.formatter._pformat_table( + self, tableid=tableid, html=html, max_width=max_width, + show_name=True, show_unit=None, show_dtype=show_dtype, + max_lines=max_lines, tableclass=tableclass) + + out = descr + '\n'.join(data_lines) + if six.PY2 and isinstance(out, six.text_type): + out = out.encode('utf-8') + + return out + + def _repr_html_(self): + return self._base_repr_(html=True, max_width=-1, + tableclass=conf.default_notebook_table_class) + + def __repr__(self): + return self._base_repr_(html=False, max_width=None) + + def __unicode__(self): + return '\n'.join(self.pformat()) + if not six.PY2: + __str__ = __unicode__ + + def __bytes__(self): + return six.text_type(self).encode('utf-8') + if six.PY2: + __str__ = __bytes__ + + @property + def has_mixin_columns(self): + """ + True if table has any mixin columns (defined as columns that are not Column + subclasses). + """ + return any(has_info_class(col, MixinInfo) for col in self.columns.values()) + + def _add_as_mixin_column(self, col): + """ + Determine if ``col`` should be added to the table directly as + a mixin column. + """ + if isinstance(col, BaseColumn): + return False + + # Is it a mixin but not not Quantity (which gets converted to Column with + # unit set). + return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo) + + def pprint(self, max_lines=None, max_width=None, show_name=True, + show_unit=None, show_dtype=False, align=None): + """Print a formatted string representation of the table. + + If no value of ``max_lines`` is supplied then the height of the + screen terminal is used to set ``max_lines``. If the terminal + height cannot be determined then the default is taken from the + configuration item ``astropy.conf.max_lines``. If a negative + value of ``max_lines`` is supplied then there is no line limit + applied. + + The same applies for max_width except the configuration item is + ``astropy.conf.max_width``. + + Parameters + ---------- + max_lines : int + Maximum number of lines in table output. + + max_width : int or `None` + Maximum character width of output. + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is True. + + align : str or list or tuple or `None` + Left/right alignment of columns. Default is right (None) for all + columns. Other allowed values are '>', '<', '^', and '0=' for + right, left, centered, and 0-padded, respectively. A list of + strings can be provided for alignment of tables with multiple + columns. + """ + lines, outs = self.formatter._pformat_table(self, max_lines, max_width, + show_name=show_name, show_unit=show_unit, + show_dtype=show_dtype, align=align) + if outs['show_length']: + lines.append('Length = {0} rows'.format(len(self))) + + n_header = outs['n_header'] + + for i, line in enumerate(lines): + if i < n_header: + color_print(line, 'red') + else: + print(line) + + def _make_index_row_display_table(self, index_row_name): + if index_row_name not in self.columns: + idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self))) + return self.__class__([idx_col] + self.columns.values(), + copy=False) + else: + return self + + def show_in_notebook(self, tableid=None, css=None, display_length=50, + table_class='astropy-default', show_row_index='idx'): + """Render the table in HTML and show it in the IPython notebook. + + Parameters + ---------- + tableid : str or `None` + An html ID tag for the table. Default is ``table{id}-XXX``, where + id is the unique integer id of the table object, id(self), and XXX + is a random number to avoid conflicts when printing the same table + multiple times. + table_class : str or `None` + A string with a list of HTML classes used to style the table. + The special default string ('astropy-default') means that the string + will be retrieved from the configuration item + ``astropy.table.default_notebook_table_class``. Note that these + table classes may make use of bootstrap, as this is loaded with the + notebook. See `this page `_ + for the list of classes. + css : string + A valid CSS string declaring the formatting for the table. Defaults + to ``astropy.table.jsviewer.DEFAULT_CSS_NB``. + display_length : int, optional + Number or rows to show. Defaults to 50. + show_row_index : str or False + If this does not evaluate to False, a column with the given name + will be added to the version of the table that gets displayed. + This new column shows the index of the row in the table itself, + even when the displayed table is re-sorted by another column. Note + that if a column with this name already exists, this option will be + ignored. Defaults to "idx". + + Notes + ----- + Currently, unlike `show_in_browser` (with ``jsviewer=True``), this + method needs to access online javascript code repositories. This is due + to modern browsers' limitations on accessing local files. Hence, if you + call this method while offline (and don't have a cached version of + jquery and jquery.dataTables), you will not get the jsviewer features. + """ + + from .jsviewer import JSViewer + from IPython.display import HTML + + if tableid is None: + tableid = 'table{0}-{1}'.format(id(self), + np.random.randint(1, 1e6)) + + jsv = JSViewer(display_length=display_length) + if show_row_index: + display_table = self._make_index_row_display_table(show_row_index) + else: + display_table = self + if table_class == 'astropy-default': + table_class = conf.default_notebook_table_class + html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid, + max_lines=-1, show_dtype=False, + tableclass=table_class) + + columns = display_table.columns.values() + sortable_columns = [i for i, col in enumerate(columns) + if col.dtype.kind in 'iufc'] + html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns) + return HTML(html) + + def show_in_browser(self, max_lines=5000, jsviewer=False, + browser='default', jskwargs={'use_local_files': True}, + tableid=None, table_class="display compact", + css=None, show_row_index='idx'): + """Render the table in HTML and show it in a web browser. + + Parameters + ---------- + max_lines : int + Maximum number of rows to export to the table (set low by default + to avoid memory issues, since the browser view requires duplicating + the table in memory). A negative value of ``max_lines`` indicates + no row limit. + jsviewer : bool + If `True`, prepends some javascript headers so that the table is + rendered as a `DataTables `_ data table. + This allows in-browser searching & sorting. + browser : str + Any legal browser name, e.g. ``'firefox'``, ``'chrome'``, + ``'safari'`` (for mac, you may need to use ``'open -a + "/Applications/Google Chrome.app" {}'`` for Chrome). If + ``'default'``, will use the system default browser. + jskwargs : dict + Passed to the `astropy.table.JSViewer` init. Defaults to + ``{'use_local_files': True}`` which means that the JavaScript + libraries will be served from local copies. + tableid : str or `None` + An html ID tag for the table. Default is ``table{id}``, where id + is the unique integer id of the table object, id(self). + table_class : str or `None` + A string with a list of HTML classes used to style the table. + Default is "display compact", and other possible values can be + found in https://www.datatables.net/manual/styling/classes + css : string + A valid CSS string declaring the formatting for the table. Defaults + to ``astropy.table.jsviewer.DEFAULT_CSS``. + show_row_index : str or False + If this does not evaluate to False, a column with the given name + will be added to the version of the table that gets displayed. + This new column shows the index of the row in the table itself, + even when the displayed table is re-sorted by another column. Note + that if a column with this name already exists, this option will be + ignored. Defaults to "idx". + """ + + import os + import webbrowser + import tempfile + from ..extern.six.moves.urllib.parse import urljoin + from ..extern.six.moves.urllib.request import pathname2url + from .jsviewer import DEFAULT_CSS + + if css is None: + css = DEFAULT_CSS + + # We can't use NamedTemporaryFile here because it gets deleted as + # soon as it gets garbage collected. + tmpdir = tempfile.mkdtemp() + path = os.path.join(tmpdir, 'table.html') + + with open(path, 'w') as tmp: + if jsviewer: + if show_row_index: + display_table = self._make_index_row_display_table(show_row_index) + else: + display_table = self + display_table.write(tmp, format='jsviewer', css=css, + max_lines=max_lines, jskwargs=jskwargs, + table_id=tableid, table_class=table_class) + else: + self.write(tmp, format='html') + + try: + br = webbrowser.get(None if browser == 'default' else browser) + except webbrowser.Error: + log.error("Browser '{}' not found.".format(browser)) + else: + br.open(urljoin('file:', pathname2url(path))) + + def pformat(self, max_lines=None, max_width=None, show_name=True, + show_unit=None, show_dtype=False, html=False, tableid=None, + align=None, tableclass=None): + """Return a list of lines for the formatted string representation of + the table. + + If no value of ``max_lines`` is supplied then the height of the + screen terminal is used to set ``max_lines``. If the terminal + height cannot be determined then the default is taken from the + configuration item ``astropy.conf.max_lines``. If a negative + value of ``max_lines`` is supplied then there is no line limit + applied. + + The same applies for ``max_width`` except the configuration item is + ``astropy.conf.max_width``. + + Parameters + ---------- + max_lines : int or `None` + Maximum number of rows to output + + max_width : int or `None` + Maximum character width of output + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is True. + + html : bool + Format the output as an HTML table. Default is False. + + tableid : str or `None` + An ID tag for the table; only used if html is set. Default is + "table{id}", where id is the unique integer id of the table object, + id(self) + + align : str or list or tuple or `None` + Left/right alignment of columns. Default is right (None) for all + columns. Other allowed values are '>', '<', '^', and '0=' for + right, left, centered, and 0-padded, respectively. A list of + strings can be provided for alignment of tables with multiple + columns. + + tableclass : str or list of str or `None` + CSS classes for the table; only used if html is set. Default is + None. + + Returns + ------- + lines : list + Formatted table as a list of strings. + + """ + + lines, outs = self.formatter._pformat_table( + self, max_lines, max_width, show_name=show_name, + show_unit=show_unit, show_dtype=show_dtype, html=html, + tableid=tableid, tableclass=tableclass, align=align) + + if outs['show_length']: + lines.append('Length = {0} rows'.format(len(self))) + + return lines + + def more(self, max_lines=None, max_width=None, show_name=True, + show_unit=None, show_dtype=False): + """Interactively browse table with a paging interface. + + Supported keys:: + + f, : forward one page + b : back one page + r : refresh same page + n : next row + p : previous row + < : go to beginning + > : go to end + q : quit browsing + h : print this help + + Parameters + ---------- + max_lines : int + Maximum number of lines in table output + + max_width : int or `None` + Maximum character width of output + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is True. + """ + self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name, + show_unit=show_unit, show_dtype=show_dtype) + + def __getitem__(self, item): + if isinstance(item, six.string_types): + return self.columns[item] + elif isinstance(item, (int, np.integer)): + return self.Row(self, item) + elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'): + return self.Row(self, item.item()) + elif (isinstance(item, (tuple, list)) and item and + all(isinstance(x, six.string_types) for x in item)): + bad_names = [x for x in item if x not in self.colnames] + if bad_names: + raise ValueError('Slice name(s) {0} not valid column name(s)' + .format(', '.join(bad_names))) + out = self.__class__([self[x] for x in item], + meta=deepcopy(self.meta), + copy_indices=self._copy_indices) + out._groups = groups.TableGroups(out, indices=self.groups._indices, + keys=self.groups._keys) + return out + elif ((isinstance(item, np.ndarray) and item.size == 0) or + (isinstance(item, (tuple, list)) and not item)): + # If item is an empty array/list/tuple then return the table with no rows + return self._new_from_slice([]) + elif (isinstance(item, slice) or + isinstance(item, np.ndarray) or + isinstance(item, list) or + isinstance(item, tuple) and all(isinstance(x, np.ndarray) + for x in item)): + # here for the many ways to give a slice; a tuple of ndarray + # is produced by np.where, as in t[np.where(t['a'] > 2)] + # For all, a new table is constructed with slice of all columns + return self._new_from_slice(item) + else: + raise ValueError('Illegal type {0} for table item access' + .format(type(item))) + + def __setitem__(self, item, value): + # If the item is a string then it must be the name of a column. + # If that column doesn't already exist then create it now. + if isinstance(item, six.string_types) and item not in self.colnames: + NewColumn = self.MaskedColumn if self.masked else self.Column + # If value doesn't have a dtype and won't be added as a mixin then + # convert to a numpy array. + if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value): + value = np.asarray(value) + + # Structured ndarray gets viewed as a mixin (unless already a valid + # mixin class). + if (isinstance(value, np.ndarray) and len(value.dtype) > 1 and + not self._add_as_mixin_column(value)): + value = value.view(NdarrayMixin) + + # Make new column and assign the value. If the table currently + # has no rows (len=0) of the value is already a Column then + # define new column directly from value. In the latter case + # this allows for propagation of Column metadata. Otherwise + # define a new column with the right length and shape and then + # set it from value. This allows for broadcasting, e.g. t['a'] + # = 1. + name = item + # If this is a column-like object that could be added directly to table + if isinstance(value, BaseColumn) or self._add_as_mixin_column(value): + # If we're setting a new column to a scalar, broadcast it. + # (things will fail in _init_from_cols if this doesn't work) + if (len(self) > 0 and (getattr(value, 'isscalar', False) or + getattr(value, 'shape', None) == () or + len(value) == 1)): + new_shape = (len(self),) + getattr(value, 'shape', ())[1:] + if isinstance(value, np.ndarray): + value = np_broadcast_to(value, shape=new_shape, + subok=True) + elif isinstance(value, ShapedLikeNDArray): + value = value._apply(np_broadcast_to, shape=new_shape, + subok=True) + + new_column = col_copy(value) + new_column.info.name = name + + elif len(self) == 0: + new_column = NewColumn(value, name=name) + else: + new_column = NewColumn(name=name, length=len(self), dtype=value.dtype, + shape=value.shape[1:], + unit=getattr(value, 'unit', None)) + new_column[:] = value + + # Now add new column to the table + self.add_columns([new_column], copy=False) + + else: + n_cols = len(self.columns) + + if isinstance(item, six.string_types): + # Set an existing column by first trying to replace, and if + # this fails do an in-place update. See definition of mask + # property for discussion of the _setitem_inplace attribute. + if (not getattr(self, '_setitem_inplace', False) + and not conf.replace_inplace): + try: + self._replace_column_warnings(item, value) + return + except Exception: + pass + self.columns[item][:] = value + + elif isinstance(item, (int, np.integer)): + # Set the corresponding row assuming value is an iterable. + if not hasattr(value, '__len__'): + raise TypeError('Right side value must be iterable') + + if len(value) != n_cols: + raise ValueError('Right side value needs {0} elements (one for each column)' + .format(n_cols)) + + for col, val in zip(self.columns.values(), value): + col[item] = val + + elif (isinstance(item, slice) or + isinstance(item, np.ndarray) or + isinstance(item, list) or + (isinstance(item, tuple) and # output from np.where + all(isinstance(x, np.ndarray) for x in item))): + + if isinstance(value, Table): + vals = (col for col in value.columns.values()) + + elif isinstance(value, np.ndarray) and value.dtype.names: + vals = (value[name] for name in value.dtype.names) + + elif np.isscalar(value): + import itertools + vals = itertools.repeat(value, n_cols) + + else: # Assume this is an iterable that will work + if len(value) != n_cols: + raise ValueError('Right side value needs {0} elements (one for each column)' + .format(n_cols)) + vals = value + + for col, val in zip(self.columns.values(), vals): + col[item] = val + + else: + raise ValueError('Illegal type {0} for table item access' + .format(type(item))) + + def __delitem__(self, item): + if isinstance(item, six.string_types): + self.remove_column(item) + elif isinstance(item, tuple): + self.remove_columns(item) + + def field(self, item): + """Return column[item] for recarray compatibility.""" + return self.columns[item] + + @property + def masked(self): + return self._masked + + @masked.setter + def masked(self, masked): + raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)' + ' to convert to a masked table)') + + def _set_masked(self, masked): + """ + Set the table masked property. + + Parameters + ---------- + masked : bool + State of table masking (`True` or `False`) + """ + if hasattr(self, '_masked'): + # The only allowed change is from None to False or True, or False to True + if self._masked is None and masked in [False, True]: + self._masked = masked + elif self._masked is False and masked is True: + log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.") + self._masked = masked + elif self._masked is masked: + raise Exception("Masked attribute is already set to {0}".format(masked)) + else: + raise Exception("Cannot change masked attribute to {0} once it is set to {1}" + .format(masked, self._masked)) + else: + if masked in [True, False, None]: + self._masked = masked + else: + raise ValueError("masked should be one of True, False, None") + if self._masked: + self._column_class = self.MaskedColumn + else: + self._column_class = self.Column + + @property + def ColumnClass(self): + if self._column_class is None: + return self.Column + else: + return self._column_class + + @property + def dtype(self): + return np.dtype([descr(col) for col in self.columns.values()]) + + @property + def colnames(self): + return list(self.columns.keys()) + + def keys(self): + return list(self.columns.keys()) + + def __len__(self): + if len(self.columns) == 0: + return 0 + + lengths = set(len(col) for col in self.columns.values()) + if len(lengths) != 1: + len_strs = [' {0} : {1}'.format(name, len(col)) for name, col in self.columns.items()] + raise ValueError('Column length mismatch:\n{0}'.format('\n'.join(len_strs))) + + return lengths.pop() + + def index_column(self, name): + """ + Return the positional index of column ``name``. + + Parameters + ---------- + name : str + column name + + Returns + ------- + index : int + Positional index of column ``name``. + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Get index of column 'b' of the table:: + + >>> t.index_column('b') + 1 + """ + try: + return self.colnames.index(name) + except ValueError: + raise ValueError("Column {0} does not exist".format(name)) + + def add_column(self, col, index=None, name=None, rename_duplicate=False): + """ + Add a new Column object ``col`` to the table. If ``index`` + is supplied then insert column before ``index`` position + in the list of columns, otherwise append column to the end + of the list. + + Parameters + ---------- + col : Column + Column object to add. + index : int or `None` + Insert column before this position or at end (default). + name : str + Column name + rename_duplicate : bool + Uniquify column name if it already exist. Default is False. + + Examples + -------- + Create a table with two columns 'a' and 'b':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> print(t) + a b + --- --- + 1 0.1 + 2 0.2 + 3 0.3 + + Create a third column 'c' and append it to the end of the table:: + + >>> col_c = Column(name='c', data=['x', 'y', 'z']) + >>> t.add_column(col_c) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Add column 'd' at position 1. Note that the column is inserted + before the given index:: + + >>> col_d = Column(name='d', data=['a', 'b', 'c']) + >>> t.add_column(col_d, 1) + >>> print(t) + a d b c + --- --- --- --- + 1 a 0.1 x + 2 b 0.2 y + 3 c 0.3 z + + Add second column named 'b' with rename_duplicate:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> col_b = Column(name='b', data=[1.1, 1.2, 1.3]) + >>> t.add_column(col_b, rename_duplicate=True) + >>> print(t) + a b b_1 + --- --- --- + 1 0.1 1.1 + 2 0.2 1.2 + 3 0.3 1.3 + + Add an unnamed column or mixin object in the table using a default name + or by specifying an explicit name with ``name``. Name can also be overridden:: + + >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) + >>> col_c = Column(data=['x', 'y']) + >>> t.add_column(col_c) + >>> t.add_column(col_c, name='c') + >>> col_b = Column(name='b', data=[1.1, 1.2]) + >>> t.add_column(col_b, name='d') + >>> print(t) + a b col2 c d + --- --- ---- --- --- + 1 0.1 x x 1.1 + 2 0.2 y y 1.2 + + To add several columns use add_columns. + """ + if index is None: + index = len(self.columns) + if name is not None: + name = (name,) + + self.add_columns([col], [index], name, rename_duplicate=rename_duplicate) + + def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False): + """ + Add a list of new Column objects ``cols`` to the table. If a + corresponding list of ``indexes`` is supplied then insert column + before each ``index`` position in the *original* list of columns, + otherwise append columns to the end of the list. + + Parameters + ---------- + cols : list of Columns + Column objects to add. + indexes : list of ints or `None` + Insert column before this position or at end (default). + names : list of str + Column names + copy : bool + Make a copy of the new columns. Default is True. + rename_duplicate : bool + Uniquify new column names if they duplicate the existing ones. + Default is False. + + + Examples + -------- + Create a table with two columns 'a' and 'b':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> print(t) + a b + --- --- + 1 0.1 + 2 0.2 + 3 0.3 + + Create column 'c' and 'd' and append them to the end of the table:: + + >>> col_c = Column(name='c', data=['x', 'y', 'z']) + >>> col_d = Column(name='d', data=['u', 'v', 'w']) + >>> t.add_columns([col_c, col_d]) + >>> print(t) + a b c d + --- --- --- --- + 1 0.1 x u + 2 0.2 y v + 3 0.3 z w + + Add column 'c' at position 0 and column 'd' at position 1. Note that + the columns are inserted before the given position:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> col_c = Column(name='c', data=['x', 'y', 'z']) + >>> col_d = Column(name='d', data=['u', 'v', 'w']) + >>> t.add_columns([col_c, col_d], [0, 1]) + >>> print(t) + c a d b + --- --- --- --- + x 1 u 0.1 + y 2 v 0.2 + z 3 w 0.3 + + Add second column 'b' and column 'c' with ``rename_duplicate``:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> col_b = Column(name='b', data=[1.1, 1.2, 1.3]) + >>> col_c = Column(name='c', data=['x', 'y', 'z']) + >>> t.add_columns([col_b, col_c], rename_duplicate=True) + >>> print(t) + a b b_1 c + --- --- --- --- + 1 0.1 1.1 x + 2 0.2 1.2 y + 3 0.3 1.3 z + + Add unnamed columns or mixin objects in the table using default names + or by specifying explicit names with ``names``. Names can also be overridden:: + + >>> t = Table() + >>> col_a = Column(data=['x', 'y']) + >>> col_b = Column(name='b', data=['u', 'v']) + >>> t.add_columns([col_a, col_b]) + >>> t.add_columns([col_a, col_b], names=['c', 'd']) + >>> print(t) + col0 b c d + ---- --- --- --- + x u x u + y v y v + """ + if indexes is None: + indexes = [len(self.columns)] * len(cols) + elif len(indexes) != len(cols): + raise ValueError('Number of indexes must match number of cols') + + if copy: + cols = [col_copy(col) for col in cols] + + if len(self.columns) == 0: + # No existing table data, init from cols + newcols = cols + else: + newcols = list(self.columns.values()) + new_indexes = list(range(len(newcols) + 1)) + for col, index in zip(cols, indexes): + i = new_indexes.index(index) + new_indexes.insert(i, None) + newcols.insert(i, col) + + if names is None: + names = (None,) * len(cols) + elif len(names) != len(cols): + raise ValueError('Number of names must match number of cols') + + for i, (col, name) in enumerate(zip(cols, names)): + if name is None: + if col.info.name is not None: + continue + name = 'col{}'.format(i + len(self.columns)) + if col.info.parent_table is not None: + col = col_copy(col) + col.info.name = name + + if rename_duplicate: + existing_names = set(self.colnames) + for col in cols: + i = 1 + orig_name = col.info.name + if col.info.name in existing_names: + # If the column belongs to another table then copy it + # before renaming + while col.info.name in existing_names: + # Iterate until a unique name is found + if col.info.parent_table is not None: + col = col_copy(col) + new_name = '{0}_{1}'.format(orig_name, i) + col.info.name = new_name + i += 1 + existing_names.add(new_name) + + self._init_from_cols(newcols) + + def _replace_column_warnings(self, name, col): + """ + Same as replace_column but issues warnings under various circumstances. + """ + warns = conf.replace_warnings + + if 'refcount' in warns and name in self.colnames: + refcount = sys.getrefcount(self[name]) + + if name in self.colnames: + old_col = self[name] + + # This may raise an exception (e.g. t['a'] = 1) in which case none of + # the downstream code runs. + self.replace_column(name, col) + + if 'always' in warns: + warnings.warn("replaced column '{}'".format(name), + TableReplaceWarning, stacklevel=3) + + if 'slice' in warns: + try: + # Check for ndarray-subclass slice. An unsliced instance + # has an ndarray for the base while sliced has the same class + # as parent. + if isinstance(old_col.base, old_col.__class__): + msg = ("replaced column '{}' which looks like an array slice. " + "The new column no longer shares memory with the " + "original array.".format(name)) + warnings.warn(msg, TableReplaceWarning, stacklevel=3) + except AttributeError: + pass + + if 'refcount' in warns: + # Did reference count change? + new_refcount = sys.getrefcount(self[name]) + if refcount != new_refcount: + msg = ("replaced column '{}' and the number of references " + "to the column changed.".format(name)) + warnings.warn(msg, TableReplaceWarning, stacklevel=3) + + if 'attributes' in warns: + # Any of the standard column attributes changed? + changed_attrs = [] + new_col = self[name] + # Check base DataInfo attributes that any column will have + for attr in DataInfo.attr_names: + if getattr(old_col.info, attr) != getattr(new_col.info, attr): + changed_attrs.append(attr) + + if changed_attrs: + msg = ("replaced column '{}' and column attributes {} changed." + .format(name, changed_attrs)) + warnings.warn(msg, TableReplaceWarning, stacklevel=3) + + def replace_column(self, name, col): + """ + Replace column ``name`` with the new ``col`` object. + + Parameters + ---------- + name : str + Name of column to replace + col : column object (list, ndarray, Column, etc) + New column object to replace the existing column + + Examples + -------- + Replace column 'a' with a float version of itself:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> float_a = t['a'].astype(float) + >>> t.replace_column('a', float_a) + """ + if name not in self.colnames: + raise ValueError('column name {0} is not in the table'.format(name)) + + if self[name].info.indices: + raise ValueError('cannot replace a table index column') + + t = self.__class__([col], names=[name]) + cols = OrderedDict(self.columns) + cols[name] = t[name] + self._init_from_cols(cols.values()) + + def remove_row(self, index): + """ + Remove a row from the table. + + Parameters + ---------- + index : int + Index of row to remove + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Remove row 1 from the table:: + + >>> t.remove_row(1) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 3 0.3 z + + To remove several rows at the same time use remove_rows. + """ + # check the index against the types that work with np.delete + if not isinstance(index, (six.integer_types, np.integer)): + raise TypeError("Row index must be an integer") + self.remove_rows(index) + + def remove_rows(self, row_specifier): + """ + Remove rows from the table. + + Parameters + ---------- + row_specifier : slice, int, or array of ints + Specification for rows to remove + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Remove rows 0 and 2 from the table:: + + >>> t.remove_rows([0, 2]) + >>> print(t) + a b c + --- --- --- + 2 0.2 y + + + Note that there are no warnings if the slice operator extends + outside the data:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> t.remove_rows(slice(10, 20, 1)) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + """ + # Update indices + for index in self.indices: + index.remove_rows(row_specifier) + + keep_mask = np.ones(len(self), dtype=np.bool) + keep_mask[row_specifier] = False + + columns = self.TableColumns() + for name, col in self.columns.items(): + newcol = col[keep_mask] + newcol.info.parent_table = self + columns[name] = newcol + + self._replace_cols(columns) + + # Revert groups to default (ungrouped) state + if hasattr(self, '_groups'): + del self._groups + + def remove_column(self, name): + """ + Remove a column from the table. + + This can also be done with:: + + del table[name] + + Parameters + ---------- + name : str + Name of column to remove + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Remove column 'b' from the table:: + + >>> t.remove_column('b') + >>> print(t) + a c + --- --- + 1 x + 2 y + 3 z + + To remove several columns at the same time use remove_columns. + """ + + self.remove_columns([name]) + + def remove_columns(self, names): + ''' + Remove several columns from the table. + + Parameters + ---------- + names : list + A list containing the names of the columns to remove + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Remove columns 'b' and 'c' from the table:: + + >>> t.remove_columns(['b', 'c']) + >>> print(t) + a + --- + 1 + 2 + 3 + + Specifying only a single column also works. Remove column 'b' from the table:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> t.remove_columns('b') + >>> print(t) + a c + --- --- + 1 x + 2 y + 3 z + + This gives the same as using remove_column. + ''' + if isinstance(names, six.string_types): + names = [names] + + for name in names: + if name not in self.columns: + raise KeyError("Column {0} does not exist".format(name)) + + for name in names: + self.columns.pop(name) + + def _convert_string_dtype(self, in_kind, out_kind, python3_only): + """ + Convert string-like columns to/from bytestring and unicode (internal only). + + Parameters + ---------- + in_kind : str + Input dtype.kind + out_kind : str + Output dtype.kind + python3_only : bool + Only do this operation for Python 3 + """ + if python3_only and six.PY2: + return + + # If there are no `in_kind` columns then do nothing + cols = self.columns.values() + if not any(col.dtype.kind == in_kind for col in cols): + return + + newcols = [] + for col in cols: + if col.dtype.kind == in_kind: + newdtype = re.sub(in_kind, out_kind, col.dtype.str) + newcol = col.__class__(col, dtype=newdtype) + else: + newcol = col + newcols.append(newcol) + + self._init_from_cols(newcols) + + def convert_bytestring_to_unicode(self, python3_only=False): + """ + Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') assuming + ASCII encoding. + + Internally this changes string columns to represent each character in the string + with a 4-byte UCS-4 equivalent, so it is inefficient for memory but allows Python + 3 scripts to manipulate string arrays with natural syntax. + + The ``python3_only`` parameter is provided as a convenience so that code can + be written in a Python 2 / 3 compatible way:: + + >>> t = Table.read('my_data.fits') + >>> t.convert_bytestring_to_unicode(python3_only=True) + + Parameters + ---------- + python3_only : bool + Only do this operation for Python 3 + """ + self._convert_string_dtype('S', 'U', python3_only) + + def convert_unicode_to_bytestring(self, python3_only=False): + """ + Convert ASCII-only unicode columns (dtype.kind='U') to bytestring (dtype.kind='S'). + + When exporting a unicode string array to a file in Python 3, it may be desirable + to encode unicode columns as bytestrings. This routine takes advantage of numpy + automated conversion which works for strings that are pure ASCII. + + The ``python3_only`` parameter is provided as a convenience so that code can + be written in a Python 2 / 3 compatible way:: + + >>> t.convert_unicode_to_bytestring(python3_only=True) + >>> t.write('my_data.fits') + + Parameters + ---------- + python3_only : bool + Only do this operation for Python 3 + """ + self._convert_string_dtype('U', 'S', python3_only) + + def keep_columns(self, names): + ''' + Keep only the columns specified (remove the others). + + Parameters + ---------- + names : list + A list containing the names of the columns to keep. All other + columns will be removed. + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Specifying only a single column name keeps only this column. + Keep only column 'a' of the table:: + + >>> t.keep_columns('a') + >>> print(t) + a + --- + 1 + 2 + 3 + + Specifying a list of column names is keeps is also possible. + Keep columns 'a' and 'c' of the table:: + + >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> t.keep_columns(['a', 'c']) + >>> print(t) + a c + --- --- + 1 x + 2 y + 3 z + ''' + + if isinstance(names, six.string_types): + names = [names] + + for name in names: + if name not in self.columns: + raise KeyError("Column {0} does not exist".format(name)) + + remove = list(set(self.keys()) - set(names)) + + self.remove_columns(remove) + + def rename_column(self, name, new_name): + ''' + Rename a column. + + This can also be done directly with by setting the ``name`` attribute + for a column:: + + table[name].name = new_name + + TODO: this won't work for mixins + + Parameters + ---------- + name : str + The current name of the column. + new_name : str + The new name for the column + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c')) + >>> print(t) + a b c + --- --- --- + 1 3 5 + 2 4 6 + + Renaming column 'a' to 'aa':: + + >>> t.rename_column('a' , 'aa') + >>> print(t) + aa b c + --- --- --- + 1 3 5 + 2 4 6 + ''' + + if name not in self.keys(): + raise KeyError("Column {0} does not exist".format(name)) + + self.columns[name].info.name = new_name + + def add_row(self, vals=None, mask=None): + """Add a new row to the end of the table. + + The ``vals`` argument can be: + + sequence (e.g. tuple or list) + Column values in the same order as table columns. + mapping (e.g. dict) + Keys corresponding to column names. Missing values will be + filled with np.zeros for the column dtype. + `None` + All values filled with np.zeros for the column dtype. + + This method requires that the Table object "owns" the underlying array + data. In particular one cannot add a row to a Table that was + initialized with copy=False from an existing array. + + The ``mask`` attribute should give (if desired) the mask for the + values. The type of the mask should match that of the values, i.e. if + ``vals`` is an iterable, then ``mask`` should also be an iterable + with the same length, and if ``vals`` is a mapping, then ``mask`` + should be a dictionary. + + Parameters + ---------- + vals : tuple, list, dict or `None` + Use the specified values in the new row + mask : tuple, list, dict or `None` + Use the specified mask values in the new row + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) + >>> print(t) + a b c + --- --- --- + 1 4 7 + 2 5 8 + + Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c':: + + >>> t.add_row([3,6,9]) + >>> print(t) + a b c + --- --- --- + 1 4 7 + 2 5 8 + 3 6 9 + """ + self.insert_row(len(self), vals, mask) + + def insert_row(self, index, vals=None, mask=None): + """Add a new row before the given ``index`` position in the table. + + The ``vals`` argument can be: + + sequence (e.g. tuple or list) + Column values in the same order as table columns. + mapping (e.g. dict) + Keys corresponding to column names. Missing values will be + filled with np.zeros for the column dtype. + `None` + All values filled with np.zeros for the column dtype. + + The ``mask`` attribute should give (if desired) the mask for the + values. The type of the mask should match that of the values, i.e. if + ``vals`` is an iterable, then ``mask`` should also be an iterable + with the same length, and if ``vals`` is a mapping, then ``mask`` + should be a dictionary. + + Parameters + ---------- + vals : tuple, list, dict or `None` + Use the specified values in the new row + mask : tuple, list, dict or `None` + Use the specified mask values in the new row + """ + colnames = self.colnames + + N = len(self) + if index < -N or index > N: + raise IndexError("Index {0} is out of bounds for table with length {1}" + .format(index, N)) + if index < 0: + index += N + + def _is_mapping(obj): + """Minimal checker for mapping (dict-like) interface for obj""" + attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items') + return all(hasattr(obj, attr) for attr in attrs) + + if mask is not None and not self.masked: + # Possibly issue upgrade warning and update self.ColumnClass. This + # does not change the existing columns. + self._set_masked(True) + + if _is_mapping(vals) or vals is None: + # From the vals and/or mask mappings create the corresponding lists + # that have entries for each table column. + if mask is not None and not _is_mapping(mask): + raise TypeError("Mismatch between type of vals and mask") + + # Now check that the mask is specified for the same keys as the + # values, otherwise things get really confusing. + if mask is not None and set(vals.keys()) != set(mask.keys()): + raise ValueError('keys in mask should match keys in vals') + + if vals and any(name not in colnames for name in vals): + raise ValueError('Keys in vals must all be valid column names') + + vals_list = [] + mask_list = [] + + for name in colnames: + if vals and name in vals: + vals_list.append(vals[name]) + mask_list.append(False if mask is None else mask[name]) + else: + col = self[name] + if hasattr(col, 'dtype'): + # Make a placeholder zero element of the right type which is masked. + # This assumes the appropriate insert() method will broadcast a + # numpy scalar to the right shape. + vals_list.append(np.zeros(shape=(), dtype=col.dtype)) + + # For masked table any unsupplied values are masked by default. + mask_list.append(self.masked and vals is not None) + else: + raise ValueError("Value must be supplied for column '{0}'".format(name)) + + vals = vals_list + mask = mask_list + + if isiterable(vals): + if mask is not None and (not isiterable(mask) or _is_mapping(mask)): + raise TypeError("Mismatch between type of vals and mask") + + if len(self.columns) != len(vals): + raise ValueError('Mismatch between number of vals and columns') + + if mask is not None: + if len(self.columns) != len(mask): + raise ValueError('Mismatch between number of masks and columns') + else: + mask = [False] * len(self.columns) + + else: + raise TypeError('Vals must be an iterable or mapping or None') + + columns = self.TableColumns() + try: + # Insert val at index for each column + for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask): + # If the new row caused a change in self.ColumnClass then + # Column-based classes need to be converted first. This is + # typical for adding a row with mask values to an unmasked table. + if isinstance(col, Column) and not isinstance(col, self.ColumnClass): + col = self.ColumnClass(col, copy=False) + + newcol = col.insert(index, val, axis=0) + if not isinstance(newcol, BaseColumn): + newcol.info.name = name + if self.masked: + newcol.mask = FalseArray(newcol.shape) + + if len(newcol) != N + 1: + raise ValueError('Incorrect length for column {0} after inserting {1}' + ' (expected {2}, got {3})' + .format(name, val, len(newcol), N + 1)) + newcol.info.parent_table = self + + # Set mask if needed + if self.masked: + newcol.mask[index] = mask_ + + columns[name] = newcol + + # insert row in indices + for table_index in self.indices: + table_index.insert_row(index, vals, self.columns.values()) + + except Exception as err: + raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}" + .format(name, err)) + else: + self._replace_cols(columns) + + # Revert groups to default (ungrouped) state + if hasattr(self, '_groups'): + del self._groups + + def _replace_cols(self, columns): + for col, new_col in zip(self.columns.values(), columns.values()): + new_col.info.indices = [] + for index in col.info.indices: + index.columns[index.col_position(col.info.name)] = new_col + new_col.info.indices.append(index) + + self.columns = columns + + def argsort(self, keys=None, kind=None): + """ + Return the indices which would sort the table according to one or + more key columns. This simply calls the `numpy.argsort` function on + the table with the ``order`` parameter set to ``keys``. + + Parameters + ---------- + keys : str or list of str + The column name(s) to order the table by + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. + + Returns + ------- + index_array : ndarray, int + Array of indices that sorts the table by the specified key + column(s). + """ + if isinstance(keys, six.string_types): + keys = [keys] + + # use index sorted order if possible + if keys is not None: + index = get_index(self, self[keys]) + if index is not None: + return index.sorted_data() + + kwargs = {} + if keys: + kwargs['order'] = keys + if kind: + kwargs['kind'] = kind + + if keys: + data = self[keys].as_array() + else: + data = self.as_array() + + return data.argsort(**kwargs) + + def sort(self, keys=None): + ''' + Sort the table according to one or more keys. This operates + on the existing table and does not return a new table. + + Parameters + ---------- + keys : str or list of str + The key(s) to order the table by. If None, use the + primary index of the Table. + + Examples + -------- + Create a table with 3 columns:: + + >>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'], + ... [12,15,18]], names=('firstname','name','tel')) + >>> print(t) + firstname name tel + --------- ------- --- + Max Miller 12 + Jo Miller 15 + John Jackson 18 + + Sorting according to standard sorting rules, first 'name' then 'firstname':: + + >>> t.sort(['name','firstname']) + >>> print(t) + firstname name tel + --------- ------- --- + John Jackson 18 + Jo Miller 15 + Max Miller 12 + ''' + if keys is None: + if not self.indices: + raise ValueError("Table sort requires input keys or a table index") + keys = [x.info.name for x in self.indices[0].columns] + + if isinstance(keys, six.string_types): + keys = [keys] + + indexes = self.argsort(keys) + sort_index = get_index(self, self[keys]) + if sort_index is not None: + # avoid inefficient relabelling of sorted index + prev_frozen = sort_index._frozen + sort_index._frozen = True + + for col in self.columns.values(): + col[:] = col.take(indexes, axis=0) + + if sort_index is not None: + # undo index freeze + sort_index._frozen = prev_frozen + # now relabel the sort index appropriately + sort_index.sort() + + def reverse(self): + ''' + Reverse the row order of table rows. The table is reversed + in place and there are no function arguments. + + Examples + -------- + Create a table with three columns:: + + >>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'], + ... [12,15,18]], names=('firstname','name','tel')) + >>> print(t) + firstname name tel + --------- ------- --- + Max Miller 12 + Jo Miller 15 + John Jackson 18 + + Reversing order:: + + >>> t.reverse() + >>> print(t) + firstname name tel + --------- ------- --- + John Jackson 18 + Jo Miller 15 + Max Miller 12 + ''' + for col in self.columns.values(): + col[:] = col[::-1] + for index in self.indices: + index.reverse() + + @classmethod + def read(cls, *args, **kwargs): + """ + Read and parse a data table and return as a Table. + + This function provides the Table interface to the astropy unified I/O + layer. This allows easily reading a file in many supported data formats + using syntax such as:: + + >>> from astropy.table import Table + >>> dat = Table.read('table.dat', format='ascii') + >>> events = Table.read('events.fits', format='fits') + + The arguments and keywords (other than ``format``) provided to this function are + passed through to the underlying data reader (e.g. `~astropy.io.ascii.read`). + """ + out = io_registry.read(cls, *args, **kwargs) + # For some readers (e.g., ascii.ecsv), the returned `out` class is not + # guaranteed to be the same as the desired output `cls`. If so, + # try coercing to desired class without copying (io.registry.read + # would normally do a copy). The normal case here is swapping + # Table <=> QTable. + if cls is not out.__class__: + try: + out = cls(out, copy=False) + except Exception: + raise TypeError('could not convert reader output to {0} ' + 'class.'.format(cls.__name__)) + return out + + def write(self, *args, **kwargs): + """ + Write this Table object out in the specified format. + + This function provides the Table interface to the astropy unified I/O + layer. This allows easily writing a file in many supported data formats + using syntax such as:: + + >>> from astropy.table import Table + >>> dat = Table([[1, 2], [3, 4]], names=('a', 'b')) + >>> dat.write('table.dat', format='ascii') + + The arguments and keywords (other than ``format``) provided to this function are + passed through to the underlying data reader (e.g. `~astropy.io.ascii.write`). + """ + io_registry.write(self, *args, **kwargs) + + def copy(self, copy_data=True): + ''' + Return a copy of the table. + + Parameters + ---------- + copy_data : bool + If `True` (the default), copy the underlying data array. + Otherwise, use the same data array. The ``meta`` is always + deepcopied regardless of the value for ``copy_data``. + ''' + out = self.__class__(self, copy=copy_data) + + # If the current table is grouped then do the same in the copy + if hasattr(self, '_groups'): + out._groups = groups.TableGroups(out, indices=self._groups._indices, + keys=self._groups._keys) + return out + + def __deepcopy__(self, memo=None): + return self.copy(True) + + def __copy__(self): + return self.copy(False) + + def __lt__(self, other): + if six.PY2: + raise TypeError("unorderable types: Table() < {0}". + format(str(type(other)))) + else: + return super(Table, self).__lt__(other) + + def __gt__(self, other): + if six.PY2: + raise TypeError("unorderable types: Table() > {0}". + format(str(type(other)))) + else: + return super(Table, self).__gt__(other) + + def __le__(self, other): + if six.PY2: + raise TypeError("unorderable types: Table() <= {0}". + format(str(type(other)))) + else: + return super(Table, self).__le__(other) + + def __ge__(self, other): + if six.PY2: + raise TypeError("unorderable types: Table() >= {0}". + format(str(type(other)))) + else: + return super(Table, self).__ge__(other) + + def __eq__(self, other): + + if isinstance(other, Table): + other = other.as_array() + + if self.masked: + if isinstance(other, np.ma.MaskedArray): + result = self.as_array() == other + else: + # If mask is True, then by definition the row doesn't match + # because the other array is not masked. + false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names]) + result = (self.as_array().data == other) & (self.mask == false_mask) + else: + if isinstance(other, np.ma.MaskedArray): + # If mask is True, then by definition the row doesn't match + # because the other array is not masked. + false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names]) + result = (self.as_array() == other.data) & (other.mask == false_mask) + else: + result = self.as_array() == other + + return result + + def __ne__(self, other): + return ~self.__eq__(other) + + @property + def groups(self): + if not hasattr(self, '_groups'): + self._groups = groups.TableGroups(self) + return self._groups + + def group_by(self, keys): + """ + Group this table by the specified ``keys`` + + This effectively splits the table into groups which correspond to + unique values of the ``keys`` grouping object. The output is a new + `TableGroups` which contains a copy of this table but sorted by row + according to ``keys``. + + The ``keys`` input to `group_by` can be specified in different ways: + + - String or list of strings corresponding to table column name(s) + - Numpy array (homogeneous or structured) with same length as this table + - `Table` with same length as this table + + Parameters + ---------- + keys : str, list of str, numpy array, or `Table` + Key grouping object + + Returns + ------- + out : `Table` + New table with groups set + """ + if self.has_mixin_columns: + raise NotImplementedError('group_by not available for tables with mixin columns') + + return groups.table_group_by(self, keys) + + def to_pandas(self): + """ + Return a :class:`pandas.DataFrame` instance + + Returns + ------- + dataframe : :class:`pandas.DataFrame` + A pandas :class:`pandas.DataFrame` instance + + Raises + ------ + ImportError + If pandas is not installed + ValueError + If the Table contains mixin or multi-dimensional columns + """ + from pandas import DataFrame + + if self.has_mixin_columns: + raise ValueError("Cannot convert a table with mixin columns to a pandas DataFrame") + + if any(getattr(col, 'ndim', 1) > 1 for col in self.columns.values()): + raise ValueError("Cannot convert a table with multi-dimensional columns to a pandas DataFrame") + + out = OrderedDict() + + for name, column in self.columns.items(): + if isinstance(column, MaskedColumn) and np.any(column.mask): + if column.dtype.kind in ['i', 'u']: + out[name] = column.astype(float).filled(np.nan) + warnings.warn( + "converted column '{}' from integer to float".format( + name), TableReplaceWarning, stacklevel=3) + elif column.dtype.kind in ['f', 'c']: + out[name] = column.filled(np.nan) + else: + out[name] = column.astype(np.object).filled(np.nan) + else: + out[name] = column + + if out[name].dtype.byteorder not in ('=', '|'): + out[name] = out[name].byteswap().newbyteorder() + + return DataFrame(out) + + @classmethod + def from_pandas(cls, dataframe): + """ + Create a `Table` from a :class:`pandas.DataFrame` instance + + Parameters + ---------- + dataframe : :class:`pandas.DataFrame` + The pandas :class:`pandas.DataFrame` instance + + Returns + ------- + table : `Table` + A `Table` (or subclass) instance + """ + + out = OrderedDict() + + for name in dataframe.columns: + column = dataframe[name] + mask = np.array(column.isnull()) + data = np.array(column) + + if data.dtype.kind == 'O': + # If all elements of an object array are string-like or np.nan + # then coerce back to a native numpy str/unicode array. + string_types = six.string_types + if not six.PY2: + string_types += (bytes,) + nan = np.nan + if all(isinstance(x, string_types) or x is nan for x in data): + # Force any missing (null) values to b''. Numpy will + # upcast to str/unicode as needed. + data[mask] = b'' + + # When the numpy object array is represented as a list then + # numpy initializes to the correct string or unicode type. + data = np.array([x for x in data]) + + if np.any(mask): + out[name] = MaskedColumn(data=data, name=name, mask=mask) + else: + out[name] = Column(data=data, name=name) + + return cls(out) + + info = TableInfo() + + +class QTable(Table): + """A class to represent tables of heterogeneous data. + + `QTable` provides a class for heterogeneous tabular data which can be + easily modified, for instance adding columns or new rows. + + The `QTable` class is identical to `Table` except that columns with an + associated ``unit`` attribute are converted to `~astropy.units.Quantity` + objects. + + Parameters + ---------- + data : numpy ndarray, dict, list, Table, or table-like object, optional + Data to initialize table. + masked : bool, optional + Specify whether the table is masked. + names : list, optional + Specify column names. + dtype : list, optional + Specify column data types. + meta : dict, optional + Metadata associated with the table. + copy : bool, optional + Copy the input data. Default is True. + rows : numpy ndarray, list of lists, optional + Row-oriented data for table instead of ``data`` argument. + copy_indices : bool, optional + Copy any indices in the input data. Default is True. + **kwargs : dict, optional + Additional keyword args when converting table-like object. + + """ + + def _add_as_mixin_column(self, col): + """ + Determine if ``col`` should be added to the table directly as + a mixin column. + """ + return has_info_class(col, MixinInfo) + + def _convert_col_for_table(self, col): + if (isinstance(col, Column) and getattr(col, 'unit', None) is not None): + # We need to turn the column into a quantity, or a subclass + # identified in the unit (such as u.mag()). + q_cls = getattr(col.unit, '_quantity_class', Quantity) + qcol = q_cls(col.data, col.unit, copy=False) + qcol.info = col.info + col = qcol + else: + col = super(QTable, self)._convert_col_for_table(col) + + return col + + +class NdarrayMixin(np.ndarray): + """ + Mixin column class to allow storage of arbitrary numpy + ndarrays within a Table. This is a subclass of numpy.ndarray + and has the same initialization options as ndarray(). + """ + info = ParentDtypeInfo() + + def __new__(cls, obj, *args, **kwargs): + self = np.array(obj, *args, **kwargs).view(cls) + if 'info' in getattr(obj, '__dict__', ()): + self.info = obj.info + return self + + def __array_finalize__(self, obj): + if obj is None: + return + + if six.callable(super(NdarrayMixin, self).__array_finalize__): + super(NdarrayMixin, self).__array_finalize__(obj) + + # Self was created from template (e.g. obj[slice] or (obj * 2)) + # or viewcast e.g. obj.view(Column). In either case we want to + # init Column attributes for self from obj if possible. + if 'info' in getattr(obj, '__dict__', ()): + self.info = obj.info + + def __reduce__(self): + # patch to pickle Quantity objects (ndarray subclasses), see + # http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html + + object_state = list(super(NdarrayMixin, self).__reduce__()) + object_state[2] = (object_state[2], self.__dict__) + return tuple(object_state) + + def __setstate__(self, state): + # patch to unpickle NdarrayMixin objects (ndarray subclasses), see + # http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html + + nd_state, own_state = state + super(NdarrayMixin, self).__setstate__(nd_state) + self.__dict__.update(own_state) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2e56cb972b6d300f6caea34d3ec893ca6916b0f Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table_helpers.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..ae7ce6687efc41af5e09f3a5f220f2dc071c77dc --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table_helpers.py @@ -0,0 +1,179 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +Helper functions for table development, mostly creating useful +tables for testing. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from itertools import cycle +import string +import numpy as np + +from .table import Table, Column +from ..extern.six.moves import zip, range +from ..utils.data_info import ParentDtypeInfo + + +class TimingTables(object): + """ + Object which contains two tables and various other attributes that + are useful for timing and other API tests. + """ + + def __init__(self, size=1000, masked=False): + self.masked = masked + + # Initialize table + self.table = Table(masked=self.masked) + + # Create column with mixed types + np.random.seed(12345) + self.table['i'] = np.arange(size) + self.table['a'] = np.random.random(size) # float + self.table['b'] = np.random.random(size) > 0.5 # bool + self.table['c'] = np.random.random((size, 10)) # 2d column + self.table['d'] = np.random.choice(np.array(list(string.ascii_letters)), size) + + self.extra_row = {'a': 1.2, 'b': True, 'c': np.repeat(1, 10), 'd': 'Z'} + self.extra_column = np.random.randint(0, 100, size) + self.row_indices = np.where(self.table['a'] > 0.9)[0] + self.table_grouped = self.table.group_by('d') + + # Another table for testing joining + self.other_table = Table(masked=self.masked) + self.other_table['i'] = np.arange(1, size, 3) + self.other_table['f'] = np.random.random() + self.other_table.sort('f') + + # Another table for testing hstack + self.other_table_2 = Table(masked=self.masked) + self.other_table_2['g'] = np.random.random(size) + self.other_table_2['h'] = np.random.random((size, 10)) + + self.bool_mask = self.table['a'] > 0.6 + + +def simple_table(size=3, cols=None, kinds='ifS', masked=False): + """ + Return a simple table for testing. + + Example + -------- + :: + + >>> from astropy.table.table_helpers import simple_table + >>> print(simple_table(3, 6, masked=True, kinds='ifOS')) + a b c d e f + --- --- -------- --- --- --- + -- 1.0 {'c': 2} -- 5 5.0 + 2 2.0 -- e 6 -- + 3 -- {'e': 4} f -- 7.0 + + Parameters + ---------- + size : int + Number of table rows + cols : int, optional + Number of table columns. Defaults to number of kinds. + kinds : str + String consisting of the column dtype.kinds. This string + will be cycled through to generate the column dtype. + The allowed values are 'i', 'f', 'S', 'O'. + + Returns + ------- + out : `Table` + New table with appropriate characteristics + """ + if cols is None: + cols = len(kinds) + if cols > 26: + raise ValueError("Max 26 columns in SimpleTable") + + columns = [] + names = [chr(ord('a') + ii) for ii in range(cols)] + letters = np.array([c for c in string.ascii_letters]) + for jj, kind in zip(range(cols), cycle(kinds)): + if kind == 'i': + data = np.arange(1, size + 1, dtype=np.int64) + jj + elif kind == 'f': + data = np.arange(size, dtype=np.float64) + jj + elif kind == 'S': + indices = (np.arange(size) + jj) % len(letters) + data = letters[indices] + elif kind == 'O': + indices = (np.arange(size) + jj) % len(letters) + vals = letters[indices] + data = [{val: index} for val, index in zip(vals, indices)] + else: + raise ValueError('Unknown data kind') + columns.append(Column(data)) + + table = Table(columns, names=names, masked=masked) + if masked: + for ii, col in enumerate(table.columns.values()): + mask = np.array((np.arange(size) + ii) % 3, dtype=bool) + col.mask = ~mask + + return table + + +def complex_table(): + """ + Return a masked table from the io.votable test set that has a wide variety + of stressing types. + """ + from ..utils.data import get_pkg_data_filename + from ..io.votable.table import parse + import warnings + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + votable = parse(get_pkg_data_filename('../io/votable/tests/data/regression.xml'), + pedantic=False) + first_table = votable.get_first_table() + table = first_table.to_table() + + return table + + +class ArrayWrapper(object): + """ + Minimal mixin using a simple wrapper around a numpy array + """ + info = ParentDtypeInfo() + + def __init__(self, data): + self.data = np.array(data) + if 'info' in getattr(data, '__dict__', ()): + self.info = data.info + + def __getitem__(self, item): + if isinstance(item, (int, np.integer)): + out = self.data[item] + else: + out = self.__class__(self.data[item]) + if 'info' in self.__dict__: + out.info = self.info + return out + + def __setitem__(self, item, value): + self.data[item] = value + + def __len__(self): + return len(self.data) + + @property + def dtype(self): + return self.data.dtype + + @property + def shape(self): + return self.data.shape + + def __repr__(self): + return ("<{0} name='{1}' data={2}>" + .format(self.__class__.__name__, self.info.name, self.data)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table_helpers.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table_helpers.pyc new file mode 100644 index 0000000000000000000000000000000000000000..185dbaf97e71e75e3322c3e486ccfca0cff1ce2e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/table_helpers.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..658aefb8947d62fde7bf35131e5c089447cc5237 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/conftest.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/conftest.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c98ad160a5e74163122607aaf20bee7b6f029833 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/conftest.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_column.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_column.py new file mode 100644 index 0000000000000000000000000000000000000000..380a9d0dc4f87b36dbbd104e52b8213e7b1ebffe --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_column.py @@ -0,0 +1,836 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import operator + +import pytest +import numpy as np + +from ...tests.helper import assert_follows_unicode_guidelines, catch_warnings +from ... import table +from ... import units as u +from ...extern import six + + +class TestColumn(): + + def test_subclass(self, Column): + c = Column(name='a') + assert isinstance(c, np.ndarray) + c2 = c * 2 + assert isinstance(c2, Column) + assert isinstance(c2, np.ndarray) + + def test_numpy_ops(self, Column): + """Show that basic numpy operations with Column behave sensibly""" + + arr = np.array([1, 2, 3]) + c = Column(arr, name='a') + + for op, test_equal in ((operator.eq, True), + (operator.ne, False), + (operator.ge, True), + (operator.gt, False), + (operator.le, True), + (operator.lt, False)): + for eq in (op(c, arr), op(arr, c)): + + assert np.all(eq) if test_equal else not np.any(eq) + assert len(eq) == 3 + if Column is table.Column: + assert type(eq) == np.ndarray + else: + assert type(eq) == np.ma.core.MaskedArray + assert eq.dtype.str == '|b1' + + lt = c - 1 < arr + assert np.all(lt) + + def test_numpy_boolean_ufuncs(self, Column): + """Show that basic numpy operations with Column behave sensibly""" + + arr = np.array([1, 2, 3]) + c = Column(arr, name='a') + + for ufunc, test_true in ((np.isfinite, True), + (np.isinf, False), + (np.isnan, False), + (np.sign, True), + (np.signbit, False)): + result = ufunc(c) + assert len(result) == len(c) + assert np.all(result) if test_true else not np.any(result) + if Column is table.Column: + assert type(result) == np.ndarray + else: + assert type(result) == np.ma.core.MaskedArray + if ufunc is not np.sign: + assert result.dtype.str == '|b1' + + def test_view(self, Column): + c = np.array([1, 2, 3], dtype=np.int64).view(Column) + assert repr(c) == "<{0} dtype='int64' length=3>\n1\n2\n3".format(Column.__name__) + + def test_format(self, Column): + """Show that the formatted output from str() works""" + from ... import conf + with conf.set_temp('max_lines', 8): + c1 = Column(np.arange(2000), name='a', dtype=float, + format='%6.2f') + assert str(c1).splitlines() == [' a ', + '-------', + ' 0.00', + ' 1.00', + ' ...', + '1998.00', + '1999.00', + 'Length = 2000 rows'] + + def test_convert_numpy_array(self, Column): + d = Column([1, 2, 3], name='a', dtype='i8') + + np_data = np.array(d) + assert np.all(np_data == d) + np_data = np.array(d, copy=False) + assert np.all(np_data == d) + np_data = np.array(d, dtype='i4') + assert np.all(np_data == d) + + def test_convert_unit(self, Column): + d = Column([1, 2, 3], name='a', dtype="f8", unit="m") + d.convert_unit_to("km") + assert np.all(d.data == [0.001, 0.002, 0.003]) + + def test_array_wrap(self): + """Test that the __array_wrap__ method converts a reduction ufunc + output that has a different shape into an ndarray view. Without this a + method call like c.mean() returns a Column array object with length=1.""" + # Mean and sum for a 1-d float column + c = table.Column(name='a', data=[1., 2., 3.]) + assert np.allclose(c.mean(), 2.0) + assert isinstance(c.mean(), (np.floating, float)) + assert np.allclose(c.sum(), 6.) + assert isinstance(c.sum(), (np.floating, float)) + + # Non-reduction ufunc preserves Column class + assert isinstance(np.cos(c), table.Column) + + # Sum for a 1-d int column + c = table.Column(name='a', data=[1, 2, 3]) + assert np.allclose(c.sum(), 6) + assert isinstance(c.sum(), (np.integer, int)) + + # Sum for a 2-d int column + c = table.Column(name='a', data=[[1, 2, 3], + [4, 5, 6]]) + assert c.sum() == 21 + assert isinstance(c.sum(), (np.integer, int)) + assert np.all(c.sum(axis=0) == [5, 7, 9]) + assert c.sum(axis=0).shape == (3,) + assert isinstance(c.sum(axis=0), np.ndarray) + + # Sum and mean for a 1-d masked column + c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1]) + assert np.allclose(c.mean(), 1.5) + assert isinstance(c.mean(), (np.floating, float)) + assert np.allclose(c.sum(), 3.) + assert isinstance(c.sum(), (np.floating, float)) + + def test_name_none(self, Column): + """Can create a column without supplying name, which defaults to None""" + c = Column([1, 2]) + assert c.name is None + assert np.all(c == np.array([1, 2])) + + def test_quantity_init(self, Column): + + c = Column(data=np.array([1, 2, 3]) * u.m) + assert np.all(c.data == np.array([1, 2, 3])) + assert np.all(c.unit == u.m) + + c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm) + assert np.all(c.data == np.array([100, 200, 300])) + assert np.all(c.unit == u.cm) + + def test_attrs_survive_getitem_after_change(self, Column): + """ + Test for issue #3023: when calling getitem with a MaskedArray subclass + the original object attributes are not copied. + """ + c1 = Column([1, 2, 3], name='a', unit='m', format='i', + description='aa', meta={'a': 1}) + c1.name = 'b' + c1.unit = 'km' + c1.format = 'i2' + c1.description = 'bb' + c1.meta = {'bbb': 2} + + for item in (slice(None, None), slice(None, 1), np.array([0, 2]), + np.array([False, True, False])): + c2 = c1[item] + assert c2.name == 'b' + assert c2.unit is u.km + assert c2.format == 'i2' + assert c2.description == 'bb' + assert c2.meta == {'bbb': 2} + + # Make sure that calling getitem resulting in a scalar does + # not copy attributes. + val = c1[1] + for attr in ('name', 'unit', 'format', 'description', 'meta'): + assert not hasattr(val, attr) + + def test_to_quantity(self, Column): + d = Column([1, 2, 3], name='a', dtype="f8", unit="m") + + assert np.all(d.quantity == ([1, 2, 3.] * u.m)) + assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value) + assert np.all(d.quantity == d.to('m')) + assert np.all(d.quantity.value == d.to('m').value) + + np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value) + np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value) + + np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value, + [299.792458, 149.896229, 99.93081933]) + + d_nounit = Column([1, 2, 3], name='a', dtype="f8", unit=None) + with pytest.raises(u.UnitsError): + d_nounit.to(u.km) + assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3])) + + # make sure the correct copy/no copy behavior is happening + q = [1, 3, 5]*u.km + + # to should always make a copy + d.to(u.km)[:] = q + np.testing.assert_allclose(d, [1, 2, 3]) + + # explcit copying of the quantity should not change the column + d.quantity.copy()[:] = q + np.testing.assert_allclose(d, [1, 2, 3]) + + # but quantity directly is a "view", accessing the underlying column + d.quantity[:] = q + np.testing.assert_allclose(d, [1000, 3000, 5000]) + + # view should also work for integers + d2 = Column([1, 2, 3], name='a', dtype=int, unit="m") + d2.quantity[:] = q + np.testing.assert_allclose(d2, [1000, 3000, 5000]) + + # but it should fail for strings or other non-numeric tables + d3 = Column(['arg', 'name', 'stuff'], name='a', unit="m") + with pytest.raises(TypeError): + d3.quantity + + def test_item_access_type(self, Column): + """ + Tests for #3095, which forces integer item access to always return a plain + ndarray or MaskedArray, even in the case of a multi-dim column. + """ + integer_types = (int, long, np.int) if six.PY2 else (int, np.int) + + for int_type in integer_types: + c = Column([[1, 2], [3, 4]]) + i0 = int_type(0) + i1 = int_type(1) + assert np.all(c[i0] == [1, 2]) + assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray) + assert c[i0].shape == (2,) + + c01 = c[i0:i1] + assert np.all(c01 == [[1, 2]]) + assert isinstance(c01, Column) + assert c01.shape == (1, 2) + + c = Column([1, 2]) + assert np.all(c[i0] == 1) + assert isinstance(c[i0], np.integer) + assert c[i0].shape == () + + c01 = c[i0:i1] + assert np.all(c01 == [1]) + assert isinstance(c01, Column) + assert c01.shape == (1,) + + def test_insert_basic(self, Column): + c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + + # Basic insert + c1 = c.insert(1, 100) + assert np.all(c1 == [0, 100, 1, 2]) + assert c1.attrs_equal(c) + assert type(c) is type(c1) + if hasattr(c1, 'mask'): + assert c1.data.shape == c1.mask.shape + + c1 = c.insert(-1, 100) + assert np.all(c1 == [0, 1, 100, 2]) + + c1 = c.insert(3, 100) + assert np.all(c1 == [0, 1, 2, 100]) + + c1 = c.insert(-3, 100) + assert np.all(c1 == [100, 0, 1, 2]) + + c1 = c.insert(1, [100, 200, 300]) + if hasattr(c1, 'mask'): + assert c1.data.shape == c1.mask.shape + + # Out of bounds index + with pytest.raises((ValueError, IndexError)): + c1 = c.insert(-4, 100) + with pytest.raises((ValueError, IndexError)): + c1 = c.insert(4, 100) + + def test_insert_axis(self, Column): + """Insert with non-default axis kwarg""" + c = Column([[1, 2], [3, 4]]) + + c1 = c.insert(1, [5, 6], axis=None) + assert np.all(c1 == [1, 5, 6, 2, 3, 4]) + + c1 = c.insert(1, [5, 6], axis=1) + assert np.all(c1 == [[1, 5, 2], [3, 6, 4]]) + + def test_insert_multidim(self, Column): + c = Column([[1, 2], + [3, 4]], name='a', dtype=int) + + # Basic insert + c1 = c.insert(1, [100, 200]) + assert np.all(c1 == [[1, 2], [100, 200], [3, 4]]) + + # Broadcast + c1 = c.insert(1, 100) + assert np.all(c1 == [[1, 2], [100, 100], [3, 4]]) + + # Wrong shape + with pytest.raises(ValueError): + c1 = c.insert(1, [100, 200, 300]) + + def test_insert_object(self, Column): + c = Column(['a', 1, None], name='a', dtype=object) + + # Basic insert + c1 = c.insert(1, [100, 200]) + assert np.all(c1 == ['a', [100, 200], 1, None]) + + def test_insert_masked(self): + c = table.MaskedColumn([0, 1, 2], name='a', fill_value=9999, + mask=[False, True, False]) + + # Basic insert + c1 = c.insert(1, 100) + assert np.all(c1.data.data == [0, 100, 1, 2]) + assert c1.fill_value == 9999 + assert np.all(c1.data.mask == [False, False, True, False]) + assert type(c) is type(c1) + + for mask in (False, True): + c1 = c.insert(1, 100, mask=mask) + assert np.all(c1.data.data == [0, 100, 1, 2]) + assert np.all(c1.data.mask == [False, mask, True, False]) + + def test_insert_masked_multidim(self): + c = table.MaskedColumn([[1, 2], + [3, 4]], name='a', dtype=int) + + c1 = c.insert(1, [100, 200], mask=True) + assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]]) + assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]]) + + c1 = c.insert(1, [100, 200], mask=[True, False]) + assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]]) + assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]]) + + with pytest.raises(ValueError): + c1 = c.insert(1, [100, 200], mask=[True, False, True]) + + def test_mask_on_non_masked_table(self): + """ + When table is not masked and trying to set mask on column then + it's Raise AttributeError. + """ + + t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8')) + + with pytest.raises(AttributeError): + t['a'].mask = [True, False] + + +class TestAttrEqual(): + """Bunch of tests originally from ATpy that test the attrs_equal method.""" + + def test_5(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy') + c2 = Column(name='a', dtype=int, unit='mJy') + assert c1.attrs_equal(c2) + + def test_6(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert c1.attrs_equal(c2) + + def test_7(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='b', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_8(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=float, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_9(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_10(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%g', + description='test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_11(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='another test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_12(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'e': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_13(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 9, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_col_and_masked_col(self): + c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert c1.attrs_equal(c2) + assert c2.attrs_equal(c1) + +# Check that the meta descriptor is working as expected. The MetaBaseTest class +# takes care of defining all the tests, and we simply have to define the class +# and any minimal set of args to pass. + + +from ...utils.tests.test_metadata import MetaBaseTest + + +class TestMetaColumn(MetaBaseTest): + test_class = table.Column + args = () + + +class TestMetaMaskedColumn(MetaBaseTest): + test_class = table.MaskedColumn + args = () + + +def test_getitem_metadata_regression(): + """ + Regression test for #1471: MaskedArray does not call __array_finalize__ so + the meta-data was not getting copied over. By overloading _update_from we + are able to work around this bug. + """ + + # Make sure that meta-data gets propagated with __getitem__ + + c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8}) + assert c[1:2].name == 'a' + assert c[1:2].description == 'b' + assert c[1:2].unit == 'm' + assert c[1:2].format == '%i' + assert c[1:2].meta['c'] == 8 + + c = table.MaskedColumn(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8}) + assert c[1:2].name == 'a' + assert c[1:2].description == 'b' + assert c[1:2].unit == 'm' + assert c[1:2].format == '%i' + assert c[1:2].meta['c'] == 8 + + # As above, but with take() - check the method and the function + + c = table.Column(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8}) + for subset in [c.take([0, 1]), np.take(c, [0, 1])]: + assert subset.name == 'a' + assert subset.description == 'b' + assert subset.unit == 'm' + assert subset.format == '%i' + assert subset.meta['c'] == 8 + + # Metadata isn't copied for scalar values + for subset in [c.take(0), np.take(c, 0)]: + assert subset == 1 + assert subset.shape == () + assert not isinstance(subset, table.Column) + + c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8}) + for subset in [c.take([0, 1]), np.take(c, [0, 1])]: + assert subset.name == 'a' + assert subset.description == 'b' + assert subset.unit == 'm' + assert subset.format == '%i' + assert subset.meta['c'] == 8 + + # Metadata isn't copied for scalar values + for subset in [c.take(0), np.take(c, 0)]: + assert subset == 1 + assert subset.shape == () + assert not isinstance(subset, table.MaskedColumn) + + +def test_unicode_guidelines(): + arr = np.array([1, 2, 3]) + c = table.Column(arr, name='a') + + assert_follows_unicode_guidelines(c) + + +def test_scalar_column(): + """ + Column is not designed to hold scalars, but for numpy 1.6 this can happen: + + >> type(np.std(table.Column([1, 2]))) + astropy.table.column.Column + """ + c = table.Column(1.5) + assert repr(c) == '1.5' + assert str(c) == '1.5' + + +def test_qtable_column_conversion(): + """ + Ensures that a QTable that gets assigned a unit switches to be Quantity-y + """ + qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f']) + + assert isinstance(qtab['i'], table.column.Column) + assert isinstance(qtab['f'], table.column.Column) + + qtab['i'].unit = 'km/s' + assert isinstance(qtab['i'], u.Quantity) + assert isinstance(qtab['f'], table.column.Column) + + # should follow from the above, but good to make sure as a #4497 regression test + assert isinstance(qtab['i'][0], u.Quantity) + assert isinstance(qtab[0]['i'], u.Quantity) + assert not isinstance(qtab['f'][0], u.Quantity) + assert not isinstance(qtab[0]['f'], u.Quantity) + + # Regression test for #5342: if a function unit is assigned, the column + # should become the appropriate FunctionQuantity subclass. + qtab['f'].unit = u.dex(u.cm/u.s**2) + assert isinstance(qtab['f'], u.Dex) + + +@pytest.mark.parametrize('masked', [True, False]) +def test_string_truncation_warning(masked): + """ + Test warnings associated with in-place assignment to a string + column that results in truncation of the right hand side. + """ + t = table.Table([['aa', 'bb']], names=['a'], masked=masked) + + with catch_warnings() as w: + from inspect import currentframe, getframeinfo + t['a'][1] = 'cc' + assert len(w) == 0 + + t['a'][:] = 'dd' + assert len(w) == 0 + + with catch_warnings() as w: + frameinfo = getframeinfo(currentframe()) + t['a'][0] = 'eee' # replace item with string that gets truncated + assert t['a'][0] == 'ee' + assert len(w) == 1 + assert ('truncated right side string(s) longer than 2 character(s)' + in str(w[0].message)) + + # Make sure the warning points back to the user code line + assert w[0].lineno == frameinfo.lineno + 1 + assert w[0].category is table.StringTruncateWarning + assert 'test_column' in w[0].filename + + with catch_warnings() as w: + t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated + assert np.all(t['a'] == ['ff', 'gg']) + assert len(w) == 1 + assert ('truncated right side string(s) longer than 2 character(s)' + in str(w[0].message)) + + with catch_warnings() as w: + # Test the obscure case of assigning from an array that was originally + # wider than any of the current elements (i.e. dtype is U4 but actual + # elements are U1 at the time of assignment). + val = np.array(['ffff', 'gggg']) + val[:] = ['f', 'g'] + t['a'][:] = val + assert np.all(t['a'] == ['f', 'g']) + assert len(w) == 0 + + +def test_string_truncation_warning_masked(): + """ + Test warnings associated with in-place assignment to a string + to a masked column, specifically where the right hand side + contains np.ma.masked. + """ + + # Test for strings, but also cover assignment of np.ma.masked to + # int and float masked column setting. This was previously only + # covered in an unrelated io.ascii test (test_line_endings) which + # showed an unexpected difference between handling of str and numeric + # masked arrays. + for values in (['a', 'b'], [1, 2], [1.0, 2.0]): + mc = table.MaskedColumn(values) + + with catch_warnings() as w: + mc[1] = np.ma.masked + assert len(w) == 0 + assert np.all(mc.mask == [False, True]) + + mc[:] = np.ma.masked + assert len(w) == 0 + assert np.all(mc.mask == [True, True]) + + mc = table.MaskedColumn(['aa', 'bb']) + + with catch_warnings() as w: + mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated + assert mc[1] == 'gg' + assert np.all(mc.mask == [True, False]) + assert len(w) == 1 + assert ('truncated right side string(s) longer than 2 character(s)' + in str(w[0].message)) + + +@pytest.mark.skipif('six.PY2') +@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn)) +def test_col_unicode_sandwich_create_from_str(Column): + """ + Create a bytestring Column from strings (including unicode) in Py3. + """ + # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. + # Stress the system by injecting non-ASCII characters. + uba = u'bä' + c = Column([uba, 'def'], dtype='S') + assert c.dtype.char == 'S' + assert c[0] == uba + assert isinstance(c[0], str) + assert isinstance(c[:0], table.Column) + assert np.all(c[:2] == np.array([uba, 'def'])) + + +@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn)) +def test_col_unicode_sandwich_bytes(Column): + """ + Create a bytestring Column from bytes and ensure that it works in Python 3 in + a convenient way like in Python 2. + """ + # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. + # Stress the system by injecting non-ASCII characters. + uba = 'ba' if six.PY2 else u'bä' + uba8 = uba.encode('utf-8') + c = Column([uba8, b'def']) + assert c.dtype.char == 'S' + assert c[0] == uba8 if six.PY2 else uba # Can compare utf-8 directly only in PY3 + assert isinstance(c[0], str) + assert isinstance(c[:0], table.Column) + assert np.all(c[:2] == np.array([uba, 'def'])) + + assert isinstance(c[:], table.Column) + assert c[:].dtype.char == 'S' + + # Array / list comparisons + if not six.PY2: + assert np.all(c == [uba, 'def']) + + ok = c == [uba8, b'def'] + assert type(ok) is type(c.data) + assert ok.dtype.char == '?' + assert np.all(ok) + + assert np.all(c == np.array([uba, u'def'])) + if not six.PY2: + assert np.all(c == np.array([uba8, b'def'])) + + # Scalar compare + cmps = (uba8,) if six.PY2 else (uba, uba8) + for cmp in cmps: + ok = c == cmp + assert type(ok) is type(c.data) + assert np.all(ok == [True, False]) + + +def test_col_unicode_sandwich_unicode(): + """ + Sanity check that Unicode Column behaves normally. + """ + # On Py2 the unicode must be ASCII-compatible, else the final test fails. + uba = 'ba' if six.PY2 else u'bä' + uba8 = uba.encode('utf-8') + + c = table.Column([uba, 'def'], dtype='U') + assert c[0] == uba + assert isinstance(c[:0], table.Column) + assert isinstance(c[0], six.text_type) + assert np.all(c[:2] == np.array([uba, 'def'])) + + assert isinstance(c[:], table.Column) + assert c[:].dtype.char == 'U' + + ok = c == [uba, 'def'] + assert type(ok) == np.ndarray + assert ok.dtype.char == '?' + assert np.all(ok) + + # In PY2 unicode is equal to bytestrings but not in PY3 + if six.PY2: + assert np.all(c == [uba8, b'def']) + else: + assert np.all(c != [uba8, b'def']) + + +def test_masked_col_unicode_sandwich(): + """ + Create a bytestring MaskedColumn and ensure that it works in Python 3 in + a convenient way like in Python 2. + """ + c = table.MaskedColumn([b'abc', b'def']) + c[1] = np.ma.masked + assert isinstance(c[:0], table.MaskedColumn) + assert isinstance(c[0], str) + + assert c[0] == 'abc' + assert c[1] is np.ma.masked + + assert isinstance(c[:], table.MaskedColumn) + assert c[:].dtype.char == 'S' + + ok = c == ['abc', 'def'] + assert ok[0] == True + assert ok[1] is np.ma.masked + assert np.all(c == [b'abc', b'def']) + assert np.all(c == np.array([u'abc', u'def'])) + assert np.all(c == np.array([b'abc', b'def'])) + + for cmp in (u'abc', b'abc'): + ok = c == cmp + assert type(ok) is np.ma.MaskedArray + assert ok[0] == True + assert ok[1] is np.ma.masked + + +@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn)) +def test_unicode_sandwich_set(Column): + """ + Test setting + """ + uba = 'ba' if six.PY2 else u'bä' + + c = Column([b'abc', b'def']) + + c[0] = b'aa' + assert np.all(c == [u'aa', u'def']) + + c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding + assert np.all(c == [uba, u'def']) + assert c.pformat() == [u'None', u'----', ' ' + uba, u' def'] + + c[:] = b'cc' + assert np.all(c == [u'cc', u'cc']) + + c[:] = uba + assert np.all(c == [uba, uba]) + + c[:] = '' + c[:] = [uba, b'def'] + assert np.all(c == [uba, b'def']) + + +@pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column]) +@pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list]) +def test_unicode_sandwich_compare(class1, class2): + """Test that comparing a bytestring Column/MaskedColumn with various + str (unicode) object types gives the expected result. Tests #6838. + """ + obj1 = class1([b'a', b'c']) + if class2 is str: + obj2 = str('a') + elif class2 is list: + obj2 = ['a', 'b'] + else: + obj2 = class2(['a', 'b']) + + if six.PY2 and class2 == str: + return pytest.skip() + + assert np.all((obj1 == obj2) == [True, False]) + assert np.all((obj2 == obj1) == [True, False]) + + assert np.all((obj1 != obj2) == [False, True]) + assert np.all((obj2 != obj1) == [False, True]) + + assert np.all((obj1 > obj2) == [False, True]) + assert np.all((obj2 > obj1) == [False, False]) + + assert np.all((obj1 <= obj2) == [True, False]) + assert np.all((obj2 <= obj1) == [True, True]) + + assert np.all((obj1 < obj2) == [False, False]) + assert np.all((obj2 < obj1) == [False, True]) + + assert np.all((obj1 >= obj2) == [True, True]) + assert np.all((obj2 >= obj1) == [True, False]) + + +def test_unicode_sandwich_masked_compare(): + """Test the fix for #6839 from #6899.""" + c1 = table.MaskedColumn(['a', 'b', 'c', 'd'], + mask=[True, False, True, False]) + c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'], + mask=[True, True, False, False]) + + for cmp in ((c1 == c2), (c2 == c1)): + assert cmp[0] is np.ma.masked + assert cmp[1] is np.ma.masked + assert cmp[2] is np.ma.masked + assert cmp[3] + + for cmp in ((c1 != c2), (c2 != c1)): + assert cmp[0] is np.ma.masked + assert cmp[1] is np.ma.masked + assert cmp[2] is np.ma.masked + assert not cmp[3] + + # Note: comparisons <, >, >=, <= fail to return a masked array entirely, + # see https://github.com/numpy/numpy/issues/10092. diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_groups.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..8091c53e2bb251b9ba0ed20ea9d079fa7ab8444f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_groups.py @@ -0,0 +1,581 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import pytest +import numpy as np + +from ...tests.helper import catch_warnings +from ...table import Table, Column +from ...utils.exceptions import AstropyUserWarning + + +def sort_eq(list1, list2): + return sorted(list1) == sorted(list2) + + +def test_column_group_by(T1): + for masked in (False, True): + t1 = Table(T1, masked=masked) + t1a = t1['a'].copy() + + # Group by a Column (i.e. numpy array) + t1ag = t1a.group_by(t1['a']) + assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8])) + + # Group by a Table + t1ag = t1a.group_by(t1['a', 'b']) + assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) + + # Group by a numpy structured array + t1ag = t1a.group_by(t1['a', 'b'].as_array()) + assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) + + +def test_table_group_by(T1): + """ + Test basic table group_by functionality for possible key types and for + masked/unmasked tables. + """ + for masked in (False, True): + t1 = Table(T1, masked=masked) + # Group by a single column key specified by name + tg = t1.group_by('a') + assert np.all(tg.groups.indices == np.array([0, 1, 4, 8])) + assert str(tg.groups) == "" + assert str(tg['a'].groups) == "" + + # Sorted by 'a' and in original order for rest + assert tg.pformat() == [' a b c d ', + '--- --- --- ---', + ' 0 a 0.0 4', + ' 1 b 3.0 5', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 a 4.0 3'] + assert tg.meta['ta'] == 1 + assert tg['c'].meta['a'] == 1 + assert tg['c'].description == 'column c' + + # Group by a table column + tg2 = t1.group_by(t1['a']) + assert tg.pformat() == tg2.pformat() + + # Group by two columns spec'd by name + for keys in (['a', 'b'], ('a', 'b')): + tg = t1.group_by(keys) + assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) + # Sorted by 'a', 'b' and in original order for rest + assert tg.pformat() == [' a b c d ', + '--- --- --- ---', + ' 0 a 0.0 4', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ' 1 b 3.0 5', + ' 2 a 4.0 3', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 c 7.0 0'] + + # Group by a Table + tg2 = t1.group_by(t1['a', 'b']) + assert tg.pformat() == tg2.pformat() + + # Group by a structured array + tg2 = t1.group_by(t1['a', 'b'].as_array()) + assert tg.pformat() == tg2.pformat() + + # Group by a simple ndarray + tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0])) + assert np.all(tg.groups.indices == np.array([0, 4, 7, 8])) + assert tg.pformat() == [' a b c d ', + '--- --- --- ---', + ' 2 c 7.0 0', + ' 2 b 6.0 2', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ' 2 b 5.0 1', + ' 2 a 4.0 3', + ' 1 b 3.0 5', + ' 0 a 0.0 4'] + + +def test_groups_keys(T1): + tg = T1.group_by('a') + keys = tg.groups.keys + assert keys.dtype.names == ('a',) + assert np.all(keys['a'] == np.array([0, 1, 2])) + + tg = T1.group_by(['a', 'b']) + keys = tg.groups.keys + assert keys.dtype.names == ('a', 'b') + assert np.all(keys['a'] == np.array([0, 1, 1, 2, 2, 2])) + assert np.all(keys['b'] == np.array(['a', 'a', 'b', 'a', 'b', 'c'])) + + # Grouping by Column ignores column name + tg = T1.group_by(T1['b']) + keys = tg.groups.keys + assert keys.dtype.names is None + + +def test_groups_iterator(T1): + tg = T1.group_by('a') + for ii, group in enumerate(tg.groups): + assert group.pformat() == tg.groups[ii].pformat() + assert group['a'][0] == tg['a'][tg.groups.indices[ii]] + + +def test_grouped_copy(T1): + """ + Test that copying a table or column copies the groups properly + """ + for masked in (False, True): + t1 = Table(T1, masked=masked) + tg = t1.group_by('a') + tgc = tg.copy() + assert np.all(tgc.groups.indices == tg.groups.indices) + assert np.all(tgc.groups.keys == tg.groups.keys) + + tac = tg['a'].copy() + assert np.all(tac.groups.indices == tg['a'].groups.indices) + + c1 = t1['a'].copy() + gc1 = c1.group_by(t1['a']) + gc1c = gc1.copy() + assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8])) + + +def test_grouped_slicing(T1): + """ + Test that slicing a table removes previous grouping + """ + + for masked in (False, True): + t1 = Table(T1, masked=masked) + + # Regular slice of a table + tg = t1.group_by('a') + tg2 = tg[3:5] + assert np.all(tg2.groups.indices == np.array([0, len(tg2)])) + assert tg2.groups.keys is None + + +def test_group_column_from_table(T1): + """ + Group a column that is part of a table + """ + cg = T1['c'].group_by(np.array(T1['a'])) + assert np.all(cg.groups.keys == np.array([0, 1, 2])) + assert np.all(cg.groups.indices == np.array([0, 1, 4, 8])) + + +def test_table_groups_mask_index(T1): + """ + Use boolean mask as item in __getitem__ for groups + """ + for masked in (False, True): + t1 = Table(T1, masked=masked).group_by('a') + + t2 = t1.groups[np.array([True, False, True])] + assert len(t2.groups) == 2 + assert t2.groups[0].pformat() == t1.groups[0].pformat() + assert t2.groups[1].pformat() == t1.groups[2].pformat() + assert np.all(t2.groups.keys['a'] == np.array([0, 2])) + + +def test_table_groups_array_index(T1): + """ + Use numpy array as item in __getitem__ for groups + """ + for masked in (False, True): + t1 = Table(T1, masked=masked).group_by('a') + + t2 = t1.groups[np.array([0, 2])] + assert len(t2.groups) == 2 + assert t2.groups[0].pformat() == t1.groups[0].pformat() + assert t2.groups[1].pformat() == t1.groups[2].pformat() + assert np.all(t2.groups.keys['a'] == np.array([0, 2])) + + +def test_table_groups_slicing(T1): + """ + Test that slicing table groups works + """ + + for masked in (False, True): + t1 = Table(T1, masked=masked).group_by('a') + + # slice(0, 2) + t2 = t1.groups[0:2] + assert len(t2.groups) == 2 + assert t2.groups[0].pformat() == t1.groups[0].pformat() + assert t2.groups[1].pformat() == t1.groups[1].pformat() + assert np.all(t2.groups.keys['a'] == np.array([0, 1])) + + # slice(1, 2) + t2 = t1.groups[1:2] + assert len(t2.groups) == 1 + assert t2.groups[0].pformat() == t1.groups[1].pformat() + assert np.all(t2.groups.keys['a'] == np.array([1])) + + # slice(0, 3, 2) + t2 = t1.groups[0:3:2] + assert len(t2.groups) == 2 + assert t2.groups[0].pformat() == t1.groups[0].pformat() + assert t2.groups[1].pformat() == t1.groups[2].pformat() + assert np.all(t2.groups.keys['a'] == np.array([0, 2])) + + +def test_grouped_item_access(T1): + """ + Test that column slicing preserves grouping + """ + for masked in (False, True): + t1 = Table(T1, masked=masked) + + # Regular slice of a table + tg = t1.group_by('a') + tgs = tg['a', 'c', 'd'] + assert np.all(tgs.groups.keys == tg.groups.keys) + assert np.all(tgs.groups.indices == tg.groups.indices) + tgsa = tgs.groups.aggregate(np.sum) + assert tgsa.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 6.0 18', + ' 2 22.0 6'] + + tgs = tg['c', 'd'] + assert np.all(tgs.groups.keys == tg.groups.keys) + assert np.all(tgs.groups.indices == tg.groups.indices) + tgsa = tgs.groups.aggregate(np.sum) + assert tgsa.pformat() == [' c d ', + '---- ---', + ' 0.0 4', + ' 6.0 18', + '22.0 6'] + + +def test_mutable_operations(T1): + """ + Operations like adding or deleting a row should removing grouping, + but adding or removing or renaming a column should retain grouping. + """ + for masked in (False, True): + t1 = Table(T1, masked=masked) + + # add row + tg = t1.group_by('a') + tg.add_row((0, 'a', 3.0, 4)) + assert np.all(tg.groups.indices == np.array([0, len(tg)])) + assert tg.groups.keys is None + + # remove row + tg = t1.group_by('a') + tg.remove_row(4) + assert np.all(tg.groups.indices == np.array([0, len(tg)])) + assert tg.groups.keys is None + + # add column + tg = t1.group_by('a') + indices = tg.groups.indices.copy() + tg.add_column(Column(name='e', data=np.arange(len(tg)))) + assert np.all(tg.groups.indices == indices) + assert np.all(tg['e'].groups.indices == indices) + assert np.all(tg['e'].groups.keys == tg.groups.keys) + + # remove column (not key column) + tg = t1.group_by('a') + tg.remove_column('b') + assert np.all(tg.groups.indices == indices) + # Still has original key col names + assert tg.groups.keys.dtype.names == ('a',) + assert np.all(tg['a'].groups.indices == indices) + + # remove key column + tg = t1.group_by('a') + tg.remove_column('a') + assert np.all(tg.groups.indices == indices) + assert tg.groups.keys.dtype.names == ('a',) + assert np.all(tg['b'].groups.indices == indices) + + # rename key column + tg = t1.group_by('a') + tg.rename_column('a', 'aa') + assert np.all(tg.groups.indices == indices) + assert tg.groups.keys.dtype.names == ('a',) + assert np.all(tg['aa'].groups.indices == indices) + + +def test_group_by_masked(T1): + t1m = Table(T1, masked=True) + t1m['c'].mask[4] = True + t1m['d'].mask[5] = True + assert t1m.group_by('a').pformat() == [' a b c d ', + '--- --- --- ---', + ' 0 a -- 4', + ' 1 b 3.0 --', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 a 4.0 3'] + + +def test_group_by_errors(T1): + """ + Appropriate errors get raised. + """ + # Bad column name as string + with pytest.raises(ValueError): + T1.group_by('f') + + # Bad column names in list + with pytest.raises(ValueError): + T1.group_by(['f', 'g']) + + # Wrong length array + with pytest.raises(ValueError): + T1.group_by(np.array([1, 2])) + + # Wrong type + with pytest.raises(TypeError): + T1.group_by(None) + + # Masked key column + t1 = Table(T1, masked=True) + t1['a'].mask[4] = True + with pytest.raises(ValueError): + t1.group_by('a') + + +def test_groups_keys_meta(T1): + """ + Make sure the keys meta['grouped_by_table_cols'] is working. + """ + # Group by column in this table + tg = T1.group_by('a') + assert tg.groups.keys.meta['grouped_by_table_cols'] is True + assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is True + assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is True + assert (tg['d'].groups[np.array([False, True, True])] + .groups.keys.meta['grouped_by_table_cols'] is True) + + # Group by external Table + tg = T1.group_by(T1['a', 'b']) + assert tg.groups.keys.meta['grouped_by_table_cols'] is False + assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is False + assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is False + + # Group by external numpy array + tg = T1.group_by(T1['a', 'b'].as_array()) + assert not hasattr(tg.groups.keys, 'meta') + assert not hasattr(tg['c'].groups.keys, 'meta') + + # Group by Column + tg = T1.group_by(T1['a']) + assert 'grouped_by_table_cols' not in tg.groups.keys.meta + assert 'grouped_by_table_cols' not in tg['c'].groups.keys.meta + + +def test_table_aggregate(T1): + """ + Aggregate a table + """ + # Table with only summable cols + t1 = T1['a', 'c', 'd'] + tg = t1.group_by('a') + tga = tg.groups.aggregate(np.sum) + assert tga.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 6.0 18', + ' 2 22.0 6'] + # Reverts to default groups + assert np.all(tga.groups.indices == np.array([0, 3])) + assert tga.groups.keys is None + + # metadata survives + assert tga.meta['ta'] == 1 + assert tga['c'].meta['a'] == 1 + assert tga['c'].description == 'column c' + + # Aggregate with np.sum with masked elements. This results + # in one group with no elements, hence a nan result and conversion + # to float for the 'd' column. + t1m = Table(t1, masked=True) + t1m['c'].mask[4:6] = True + t1m['d'].mask[4:6] = True + tg = t1m.group_by('a') + with catch_warnings(Warning) as warning_lines: + tga = tg.groups.aggregate(np.sum) + assert warning_lines[0].category == UserWarning + assert "converting a masked element to nan" in str(warning_lines[0].message) + + assert tga.pformat() == [' a c d ', + '--- ---- ----', + ' 0 nan nan', + ' 1 3.0 13.0', + ' 2 22.0 6.0'] + + # Aggregrate with np.sum with masked elements, but where every + # group has at least one remaining (unmasked) element. Then + # the int column stays as an int. + t1m = Table(t1, masked=True) + t1m['c'].mask[5] = True + t1m['d'].mask[5] = True + tg = t1m.group_by('a') + tga = tg.groups.aggregate(np.sum) + assert tga.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 3.0 13', + ' 2 22.0 6'] + + # Aggregate with a column type that cannot by supplied to the aggregating + # function. This raises a warning but still works. + tg = T1.group_by('a') + with catch_warnings(Warning) as warning_lines: + tga = tg.groups.aggregate(np.sum) + assert warning_lines[0].category == AstropyUserWarning + assert "Cannot aggregate column" in str(warning_lines[0].message) + assert tga.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 6.0 18', + ' 2 22.0 6'] + + +def test_table_aggregate_reduceat(T1): + """ + Aggregate table with functions which have a reduceat method + """ + # Comparison functions without reduceat + def np_mean(x): + return np.mean(x) + + def np_sum(x): + return np.sum(x) + + def np_add(x): + return np.add(x) + + # Table with only summable cols + t1 = T1['a', 'c', 'd'] + tg = t1.group_by('a') + # Comparison + tga_r = tg.groups.aggregate(np.sum) + tga_a = tg.groups.aggregate(np.add) + tga_n = tg.groups.aggregate(np_sum) + + assert np.all(tga_r == tga_n) + assert np.all(tga_a == tga_n) + assert tga_n.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 6.0 18', + ' 2 22.0 6'] + + tga_r = tg.groups.aggregate(np.mean) + tga_n = tg.groups.aggregate(np_mean) + assert np.all(tga_r == tga_n) + assert tga_n.pformat() == [' a c d ', + '--- --- ---', + ' 0 0.0 4.0', + ' 1 2.0 6.0', + ' 2 5.5 1.5'] + + # Binary ufunc np_add should raise warning without reduceat + t2 = T1['a', 'c'] + tg = t2.group_by('a') + + with catch_warnings(Warning) as warning_lines: + tga = tg.groups.aggregate(np_add) + assert warning_lines[0].category == AstropyUserWarning + assert "Cannot aggregate column" in str(warning_lines[0].message) + assert tga.pformat() == [' a ', + '---', + ' 0', + ' 1', + ' 2'] + + +def test_column_aggregate(T1): + """ + Aggregate a single table column + """ + for masked in (False, True): + tg = Table(T1, masked=masked).group_by('a') + tga = tg['c'].groups.aggregate(np.sum) + assert tga.pformat() == [' c ', + '----', + ' 0.0', + ' 6.0', + '22.0'] + + +def test_table_filter(): + """ + Table groups filtering + """ + def all_positive(table, key_colnames): + colnames = [name for name in table.colnames if name not in key_colnames] + for colname in colnames: + if np.any(table[colname] < 0): + return False + return True + + # Negative value in 'a' column should not filter because it is a key col + t = Table.read([' a c d', + ' -2 7.0 0', + ' -2 5.0 1', + ' 0 0.0 4', + ' 1 3.0 5', + ' 1 2.0 -6', + ' 1 1.0 7', + ' 3 3.0 5', + ' 3 -2.0 6', + ' 3 1.0 7', + ], format='ascii') + tg = t.group_by('a') + t2 = tg.groups.filter(all_positive) + assert t2.groups[0].pformat() == [' a c d ', + '--- --- ---', + ' -2 7.0 0', + ' -2 5.0 1'] + assert t2.groups[1].pformat() == [' a c d ', + '--- --- ---', + ' 0 0.0 4'] + + +def test_column_filter(): + """ + Table groups filtering + """ + def all_positive(column): + if np.any(column < 0): + return False + return True + + # Negative value in 'a' column should not filter because it is a key col + t = Table.read([' a c d', + ' -2 7.0 0', + ' -2 5.0 1', + ' 0 0.0 4', + ' 1 3.0 5', + ' 1 2.0 -6', + ' 1 1.0 7', + ' 3 3.0 5', + ' 3 -2.0 6', + ' 3 1.0 7', + ], format='ascii') + tg = t.group_by('a') + c2 = tg['c'].groups.filter(all_positive) + assert len(c2.groups) == 3 + assert c2.groups[0].pformat() == [' c ', '---', '7.0', '5.0'] + assert c2.groups[1].pformat() == [' c ', '---', '0.0'] + assert c2.groups[2].pformat() == [' c ', '---', '3.0', '2.0', '1.0'] diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_index.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_index.py new file mode 100644 index 0000000000000000000000000000000000000000..23d3172b73dae611461c138b7244872a77087da2 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_index.py @@ -0,0 +1,463 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +import pytest +import numpy as np + +from .test_table import SetupData +from ..bst import BST, FastRBT, FastBST +from ..sorted_array import SortedArray +from ..table import QTable, Row +from ... import units as u +from ...time import Time +from ..column import BaseColumn +from ...extern.six.moves import range + +try: + import bintrees +except ImportError: + HAS_BINTREES = False +else: + HAS_BINTREES = True + + +if HAS_BINTREES: + available_engines = [BST, FastBST, FastRBT, SortedArray] +else: + available_engines = [BST, SortedArray] + + +@pytest.fixture(params=available_engines) +def engine(request): + return request.param + + +_col = [1, 2, 3, 4, 5] + + +@pytest.fixture(params=[ + _col, + u.Quantity(_col), + Time(_col, format='jyear'), +]) +def main_col(request): + return request.param + + +def assert_col_equal(col, array): + if isinstance(col, Time): + assert np.all(col == Time(array, format='jyear')) + else: + assert np.all(col == col.__class__(array)) + + +@pytest.mark.usefixtures('table_types') +class TestIndex(SetupData): + def _setup(self, main_col, table_types): + super(TestIndex, self)._setup(table_types) + self.main_col = main_col + if isinstance(main_col, u.Quantity): + self._table_type = QTable + if not isinstance(main_col, list): + self._column_type = lambda x: x # don't change mixin type + self.mutable = isinstance(main_col, (list, u.Quantity)) + + def make_col(self, name, lst): + return self._column_type(lst, name=name) + + def make_val(self, val): + if isinstance(self.main_col, Time): + return Time(val, format='jyear') + return val + + @property + def t(self): + if not hasattr(self, '_t'): + self._t = self._table_type() + self._t['a'] = self._column_type(self.main_col) + self._t['b'] = self._column_type([4.0, 5.1, 6.2, 7.0, 1.1]) + self._t['c'] = self._column_type(['7', '8', '9', '10', '11']) + return self._t + + @pytest.mark.parametrize("composite", [False, True]) + def test_table_index(self, main_col, table_types, composite, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index(('a', 'b') if composite else 'a', engine=engine) + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + if not self.mutable: + return + + # test altering table columns + t['a'][0] = 4 + t.add_row((6, 6.0, '7')) + t['a'][3] = 10 + t.remove_row(2) + t.add_row((4, 5.0, '9')) + + assert_col_equal(t['a'], np.array([4, 2, 10, 5, 6, 4])) + assert np.allclose(t['b'], np.array([4.0, 5.1, 7.0, 1.1, 6.0, 5.0])) + assert np.all(t['c'].data == np.array(['7', '8', '10', '11', '7', '9'])) + index = t.indices[0] + l = list(index.data.items()) + + if composite: + assert np.all(l == [((2, 5.1), [1]), + ((4, 4.0), [0]), + ((4, 5.0), [5]), + ((5, 1.1), [3]), + ((6, 6.0), [4]), + ((10, 7.0), [2])]) + else: + assert np.all(l == [((2,), [1]), + ((4,), [0, 5]), + ((5,), [3]), + ((6,), [4]), + ((10,), [2])]) + t.remove_indices('a') + assert len(t.indices) == 0 + + def test_table_slicing(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + for slice_ in ([0, 2], np.array([0, 2])): + t2 = t[slice_] + # t2 should retain an index on column 'a' + assert len(t2.indices) == 1 + assert_col_equal(t2['a'], [1, 3]) + + # the index in t2 should reorder row numbers after slicing + assert np.all(t2.indices[0].sorted_data() == [0, 1]) + # however, this index should be a deep copy of t1's index + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + def test_remove_rows(self, main_col, table_types, engine): + self._setup(main_col, table_types) + if not self.mutable: + return + t = self.t + t.add_index('a', engine=engine) + + # remove individual row + t2 = t.copy() + t2.remove_rows(2) + assert_col_equal(t2['a'], [1, 2, 4, 5]) + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3]) + + # remove by list, ndarray, or slice + for cut in ([0, 2, 4], np.array([0, 2, 4]), slice(0, 5, 2)): + t2 = t.copy() + t2.remove_rows(cut) + assert_col_equal(t2['a'], [2, 4]) + assert np.all(t2.indices[0].sorted_data() == [0, 1]) + + with pytest.raises(ValueError): + t.remove_rows((0, 2, 4)) + + def test_col_get_slice(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + + # get slice + t2 = t[1:3] # table slice + assert_col_equal(t2['a'], [2, 3]) + assert np.all(t2.indices[0].sorted_data() == [0, 1]) + + col_slice = t['a'][1:3] + assert_col_equal(col_slice, [2, 3]) + # true column slices discard indices + if isinstance(t['a'], BaseColumn): + assert len(col_slice.info.indices) == 0 + + # take slice of slice + t2 = t[::2] + assert_col_equal(t2['a'], np.array([1, 3, 5])) + t3 = t2[::-1] + assert_col_equal(t3['a'], np.array([5, 3, 1])) + assert np.all(t3.indices[0].sorted_data() == [2, 1, 0]) + t3 = t2[:2] + assert_col_equal(t3['a'], np.array([1, 3])) + assert np.all(t3.indices[0].sorted_data() == [0, 1]) + # out-of-bound slices + for t_empty in (t2[3:], t2[2:1], t3[2:]): + assert len(t_empty['a']) == 0 + assert np.all(t_empty.indices[0].sorted_data() == []) + + if self.mutable: + # get boolean mask + mask = t['a'] % 2 == 1 + t2 = t[mask] + assert_col_equal(t2['a'], [1, 3, 5]) + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2]) + + def test_col_set_slice(self, main_col, table_types, engine): + self._setup(main_col, table_types) + if not self.mutable: + return + t = self.t + t.add_index('a', engine=engine) + + # set slice + t2 = t.copy() + t2['a'][1:3] = np.array([6, 7]) + assert_col_equal(t2['a'], np.array([1, 6, 7, 4, 5])) + assert np.all(t2.indices[0].sorted_data() == [0, 3, 4, 1, 2]) + + # change original table via slice reference + t2 = t.copy() + t3 = t2[1:3] + assert_col_equal(t3['a'], np.array([2, 3])) + assert np.all(t3.indices[0].sorted_data() == [0, 1]) + t3['a'][0] = 5 + assert_col_equal(t3['a'], np.array([5, 3])) + assert_col_equal(t2['a'], np.array([1, 5, 3, 4, 5])) + assert np.all(t3.indices[0].sorted_data() == [1, 0]) + assert np.all(t2.indices[0].sorted_data() == [0, 2, 3, 1, 4]) + + # set boolean mask + t2 = t.copy() + mask = t['a'] % 2 == 1 + t2['a'][mask] = 0. + assert_col_equal(t2['a'], [0, 2, 0, 4, 0]) + assert np.all(t2.indices[0].sorted_data() == [0, 2, 4, 1, 3]) + + def test_multiple_slices(self, main_col, table_types, engine): + self._setup(main_col, table_types) + + if not self.mutable: + return + + t = self.t + t.add_index('a', engine=engine) + + for i in range(6, 51): + t.add_row((i, 1.0, 'A')) + + assert_col_equal(t['a'], [i for i in range(1, 51)]) + assert np.all(t.indices[0].sorted_data() == [i for i in range(50)]) + + evens = t[::2] + assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)]) + reverse = evens[::-1] + index = reverse.indices[0] + assert (index.start, index.stop, index.step) == (48, -2, -2) + assert np.all(index.sorted_data() == [i for i in range(24, -1, -1)]) + + # modify slice of slice + reverse[-10:] = 0 + expected = np.array([i for i in range(1, 51)]) + expected[:20][expected[:20] % 2 == 1] = 0 + assert_col_equal(t['a'], expected) + assert_col_equal(evens['a'], expected[::2]) + assert_col_equal(reverse['a'], expected[::2][::-1]) + # first ten evens are now zero + assert np.all(t.indices[0].sorted_data() == + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19] + + [i for i in range(20, 50)]) + assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)]) + assert np.all(reverse.indices[0].sorted_data() == + [i for i in range(24, -1, -1)]) + + # try different step sizes of slice + t2 = t[1:20:2] + assert_col_equal(t2['a'], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]) + assert np.all(t2.indices[0].sorted_data() == [i for i in range(10)]) + t3 = t2[::3] + assert_col_equal(t3['a'], [2, 8, 14, 20]) + assert np.all(t3.indices[0].sorted_data() == [0, 1, 2, 3]) + t4 = t3[2::-1] + assert_col_equal(t4['a'], [14, 8, 2]) + assert np.all(t4.indices[0].sorted_data() == [2, 1, 0]) + + def test_sort(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t[::-1] # reverse table + assert_col_equal(t['a'], [5, 4, 3, 2, 1]) + t.add_index('a', engine=engine) + assert np.all(t.indices[0].sorted_data() == [4, 3, 2, 1, 0]) + + if not self.mutable: + return + + # sort table by column a + t2 = t.copy() + t2.sort('a') + assert_col_equal(t2['a'], [1, 2, 3, 4, 5]) + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + # sort table by primary key + t2 = t.copy() + t2.sort() + assert_col_equal(t2['a'], [1, 2, 3, 4, 5]) + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + def test_insert_row(self, main_col, table_types, engine): + self._setup(main_col, table_types) + + if not self.mutable: + return + + t = self.t + t.add_index('a', engine=engine) + t.insert_row(2, (6, 1.0, '12')) + assert_col_equal(t['a'], [1, 2, 6, 3, 4, 5]) + assert np.all(t.indices[0].sorted_data() == [0, 1, 3, 4, 5, 2]) + t.insert_row(1, (0, 4.0, '13')) + assert_col_equal(t['a'], [1, 0, 2, 6, 3, 4, 5]) + assert np.all(t.indices[0].sorted_data() == [1, 0, 2, 4, 5, 6, 3]) + + def test_index_modes(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + + # first, no special mode + assert len(t[[1, 3]].indices) == 1 + assert len(t[::-1].indices) == 1 + assert len(self._table_type(t).indices) == 1 + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t2 = t.copy() + + # non-copy mode + with t.index_mode('discard_on_copy'): + assert len(t[[1, 3]].indices) == 0 + assert len(t[::-1].indices) == 0 + assert len(self._table_type(t).indices) == 0 + assert len(t2.copy().indices) == 1 # mode should only affect t + + # make sure non-copy mode is exited correctly + assert len(t[[1, 3]].indices) == 1 + + if not self.mutable: + return + + # non-modify mode + with t.index_mode('freeze'): + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t['a'][0] = 6 + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t.add_row((2, 1.5, '12')) + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t.remove_rows([1, 3]) + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + assert_col_equal(t['a'], [6, 3, 5, 2]) + # mode should only affect t + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t2['a'][0] = 6 + assert np.all(t2.indices[0].sorted_data() == [1, 2, 3, 4, 0]) + + # make sure non-modify mode is exited correctly + assert np.all(t.indices[0].sorted_data() == [3, 1, 2, 0]) + + if isinstance(t['a'], BaseColumn): + assert len(t['a'][::-1].info.indices) == 0 + with t.index_mode('copy_on_getitem'): + assert len(t['a'][[1, 2]].info.indices) == 1 + # mode should only affect t + assert len(t2['a'][[1, 2]].info.indices) == 0 + + assert len(t['a'][::-1].info.indices) == 0 + assert len(t2['a'][::-1].info.indices) == 0 + + def test_index_retrieval(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + t.add_index(['a', 'c'], engine=engine) + assert len(t.indices) == 2 + assert len(t.indices['a'].columns) == 1 + assert len(t.indices['a', 'c'].columns) == 2 + + with pytest.raises(IndexError): + t.indices['b'] + + def test_col_rename(self, main_col, table_types, engine): + ''' + Checks for a previous bug in which copying a Table + with different column names raised an exception. + ''' + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + t2 = self._table_type(self.t, names=['d', 'e', 'f']) + assert len(t2.indices) == 1 + + def test_table_loc(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + + t.add_index('a', engine=engine) + t.add_index('b', engine=engine) + + t2 = t.loc[self.make_val(3)] # single label, with primary key 'a' + assert_col_equal(t2['a'], [3]) + assert isinstance(t2, Row) + + # list search + t2 = t.loc[[self.make_val(1), self.make_val(4), self.make_val(2)]] + assert_col_equal(t2['a'], [1, 4, 2]) # same order as input list + if not isinstance(main_col, Time): + # ndarray search + t2 = t.loc[np.array([1, 4, 2])] + assert_col_equal(t2['a'], [1, 4, 2]) + assert_col_equal(t2['a'], [1, 4, 2]) + t2 = t.loc[self.make_val(3): self.make_val(5)] # range search + assert_col_equal(t2['a'], [3, 4, 5]) + t2 = t.loc['b', 5.0:7.0] + assert_col_equal(t2['b'], [5.1, 6.2, 7.0]) + # search by sorted index + t2 = t.iloc[0:2] # two smallest rows by column 'a' + assert_col_equal(t2['a'], [1, 2]) + t2 = t.iloc['b', 2:] # exclude two smallest rows in column 'b' + assert_col_equal(t2['b'], [5.1, 6.2, 7.0]) + + for t2 in (t.loc[:], t.iloc[:]): + assert_col_equal(t2['a'], [1, 2, 3, 4, 5]) + + def test_invalid_search(self, main_col, table_types, engine): + # using .loc with a value not present should raise an exception + self._setup(main_col, table_types) + t = self.t + + t.add_index('a') + with pytest.raises(KeyError): + t.loc[self.make_val(6)] + + def test_copy_index_references(self, main_col, table_types, engine): + # check against a bug in which indices were given an incorrect + # column reference when copied + self._setup(main_col, table_types) + t = self.t + + t.add_index('a') + t.add_index('b') + t2 = t.copy() + assert t2.indices['a'].columns[0] is t2['a'] + assert t2.indices['b'].columns[0] is t2['b'] + + def test_unique_index(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + + t.add_index('a', engine=engine, unique=True) + assert np.all(t.indices['a'].sorted_data() == [0, 1, 2, 3, 4]) + + if self.mutable: + with pytest.raises(ValueError): + t.add_row((5, 5.0, '9')) + + def test_copy_indexed_table(self, table_types): + self._setup(_col, table_types) + t = self.t + t.add_index('a') + t.add_index(['a', 'b']) + for tp in (self._table_type(t), t.copy()): + assert len(t.indices) == len(tp.indices) + for index, indexp in zip(t.indices, tp.indices): + assert np.all(index.data.data == indexp.data.data) + assert index.data.data.colnames == indexp.data.data.colnames diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_item_access.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_item_access.py new file mode 100644 index 0000000000000000000000000000000000000000..f91c5e5e90b63ab3cdc91f00a03930c281500d60 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_item_access.py @@ -0,0 +1,263 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +""" Verify item access API in: +https://github.com/astropy/astropy/wiki/Table-item-access-definition +""" + +import pytest +import numpy as np + + +@pytest.mark.usefixtures('table_data') +class BaseTestItems(): + pass + + +@pytest.mark.usefixtures('table_data') +class TestTableColumnsItems(BaseTestItems): + + def test_by_name(self, table_data): + """Access TableColumns by name and show that item access returns + a Column that refers to underlying table data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + assert self.tc['a'].name == 'a' + assert self.tc['a'][1] == 2 + assert self.tc['a'].description == 'da' + assert self.tc['a'].format == 'fa' + assert self.tc['a'].meta == {'ma': 1} + assert self.tc['a'].unit == 'ua' + assert self.tc['a'].attrs_equal(table_data.COLS[0]) + assert isinstance(self.tc['a'], table_data.Column) + + self.tc['b'][1] = 0 + assert self.t['b'][1] == 0 + + def test_by_position(self, table_data): + """Access TableColumns by position and show that item access returns + a Column that refers to underlying table data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + assert self.tc[1].name == 'b' + assert np.all(self.tc[1].data == table_data.COLS[1].data) + assert self.tc[1].description == 'db' + assert self.tc[1].format == 'fb' + assert self.tc[1].meta == {'mb': 1} + assert self.tc[1].unit == 'ub' + assert self.tc[1].attrs_equal(table_data.COLS[1]) + assert isinstance(self.tc[1], table_data.Column) + + assert self.tc[2].unit == 'ub' + + self.tc[1][1] = 0 + assert self.t['b'][1] == 0 + + def test_mult_columns(self, table_data): + """Access TableColumns with "fancy indexing" and showed that returned + TableColumns object still references original data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + tc2 = self.tc['b', 'c'] + assert tc2[1].name == 'c' + assert tc2[1][1] == 8 + assert tc2[0].name == 'b' + assert tc2[0][1] == 5 + + tc2['c'][1] = 0 + assert self.tc['c'][1] == 0 + assert self.t['c'][1] == 0 + + def test_column_slice(self, table_data): + """Access TableColumns with slice and showed that returned + TableColumns object still references original data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + tc2 = self.tc[1:3] + assert tc2[1].name == 'c' + assert tc2[1][1] == 8 + assert tc2[0].name == 'b' + assert tc2[0][1] == 5 + + tc2['c'][1] = 0 + assert self.tc['c'][1] == 0 + assert self.t['c'][1] == 0 + + +@pytest.mark.usefixtures('table_data') +class TestTableItems(BaseTestItems): + + @pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)]) + def test_column(self, table_data, idx): + """Column access returns REFERENCE to data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + a = self.t['a'] + assert a[idx] == 2 + a[idx] = 0 + assert self.t['a'][idx] == 0 + + @pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)]) + def test_row(self, table_data, idx): + """Row access returns REFERENCE to data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + row = self.t[idx] + assert row['a'] == 2 + assert row[idx] == 5 + assert row.columns['a'].attrs_equal(table_data.COLS[0]) + assert row.columns['b'].attrs_equal(table_data.COLS[1]) + assert row.columns['c'].attrs_equal(table_data.COLS[2]) + + # Check that setting by col index sets the table and row value + row[idx] = 0 + assert row[idx] == 0 + assert row['b'] == 0 + assert self.t['b'][idx] == 0 + assert self.t[idx]['b'] == 0 + + # Check that setting by col name sets the table and row value + row['a'] = 0 + assert row[0] == 0 + assert row['a'] == 0 + assert self.t['a'][1] == 0 + assert self.t[1]['a'] == 0 + + def test_empty_iterable_item(self, table_data): + """ + Table item access with [], (), or np.array([]) returns the same table + with no rows. + """ + self.t = table_data.Table(table_data.COLS) + for item in [], (), np.array([]): + t2 = self.t[item] + assert not t2 + assert len(t2) == 0 + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['b'].attrs_equal(table_data.COLS[1]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + + def test_table_slice(self, table_data): + """Table slice returns REFERENCE to data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + t2 = self.t[1:3] + assert np.all(t2['a'] == table_data.DATA['a'][1:3]) + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['b'].attrs_equal(table_data.COLS[1]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + t2['a'][0] = 0 + assert np.all(self.t['a'] == np.array([1, 0, 3])) + assert t2.masked == self.t.masked + assert t2._column_class == self.t._column_class + assert isinstance(t2, table_data.Table) + + def test_fancy_index_slice(self, table_data): + """Table fancy slice returns COPY of data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + slice = np.array([0, 2]) + t2 = self.t[slice] + assert np.all(t2['a'] == table_data.DATA['a'][slice]) + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['b'].attrs_equal(table_data.COLS[1]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + t2['a'][0] = 0 + + assert np.all(self.t.as_array() == table_data.DATA) + assert np.any(t2['a'] != table_data.DATA['a'][slice]) + assert t2.masked == self.t.masked + assert t2._column_class == self.t._column_class + assert isinstance(t2, table_data.Table) + + def test_list_index_slice(self, table_data): + """Table list index slice returns COPY of data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + slice = [0, 2] + t2 = self.t[slice] + assert np.all(t2['a'] == table_data.DATA['a'][slice]) + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['b'].attrs_equal(table_data.COLS[1]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + t2['a'][0] = 0 + + assert np.all(self.t.as_array() == table_data.DATA) + assert np.any(t2['a'] != table_data.DATA['a'][slice]) + assert t2.masked == self.t.masked + assert t2._column_class == self.t._column_class + assert isinstance(t2, table_data.Table) + + def test_select_columns(self, table_data): + """Select columns returns COPY of data and all column + attributes""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + # try both lists and tuples + for columns in (('a', 'c'), ['a', 'c']): + t2 = self.t[columns] + assert np.all(t2['a'] == table_data.DATA['a']) + assert np.all(t2['c'] == table_data.DATA['c']) + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + t2['a'][0] = 0 + assert np.all(self.t.as_array() == table_data.DATA) + assert np.any(t2['a'] != table_data.DATA['a']) + assert t2.masked == self.t.masked + assert t2._column_class == self.t._column_class + + def test_select_columns_fail(self, table_data): + """Selecting a column that doesn't exist fails""" + self.t = table_data.Table(table_data.COLS) + + with pytest.raises(ValueError) as err: + self.t[['xxxx']] + assert 'Slice name(s) xxxx not valid column name(s)' in str(err) + + with pytest.raises(ValueError) as err: + self.t[['xxxx', 'yyyy']] + assert 'Slice name(s) xxxx, yyyy not valid column name(s)' in str(err) + + def test_np_where(self, table_data): + """Select rows using output of np.where""" + t = table_data.Table(table_data.COLS) + # Select last two rows + rows = np.where(t['a'] > 1.5) + t2 = t[rows] + assert np.all(t2['a'] == [2, 3]) + assert np.all(t2['b'] == [5, 6]) + assert isinstance(t2, table_data.Table) + + # Select no rows + rows = np.where(t['a'] > 100) + t2 = t[rows] + assert len(t2) == 0 + assert isinstance(t2, table_data.Table) + + def test_np_integers(self, table_data): + """ + Select rows using numpy integers. This is a regression test for a + py 3.3 failure mode + """ + t = table_data.Table(table_data.COLS) + idxs = np.random.randint(len(t), size=2) + item = t[idxs[1]] + + def test_select_bad_column(self, table_data): + """Select column name that does not exist""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + with pytest.raises(ValueError): + self.t['a', 1] diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_jsviewer.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_jsviewer.py new file mode 100644 index 0000000000000000000000000000000000000000..51738e17871a8ec221e455fa082644da8c7b70f1 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_jsviewer.py @@ -0,0 +1,178 @@ +from os.path import abspath, dirname, join +import textwrap + +import pytest + +from ..table import Table +from ... import extern +from ...extern.six.moves import zip + +try: + import IPython # pylint: disable=W0611 +except ImportError: + HAS_IPYTHON = False +else: + HAS_IPYTHON = True + +EXTERN_DIR = abspath(dirname(extern.__file__)) + +REFERENCE = """ + + + + + + + + + + + + + + + + + + +%(lines)s +
ab
+ + +""" + +TPL = (' \n' + ' {0}\n' + ' {1}\n' + ' ') + + +def format_lines(col1, col2): + return '\n'.join(TPL.format(a, b) for a, b in zip(col1, col2)) + + +def test_write_jsviewer_default(tmpdir): + t = Table() + t['a'] = [1, 2, 3, 4, 5] + t['b'] = ['a', 'b', 'c', 'd', 'e'] + t['a'].unit = 'm' + + tmpfile = tmpdir.join('test.html').strpath + + t.write(tmpfile, format='jsviewer') + ref = REFERENCE % dict( + lines=format_lines(t['a'], t['b']), + table_class='display compact', + table_id='table%s' % id(t), + length='50', + display_length='10, 25, 50, 100, 500, 1000', + datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', + datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', + jquery_url='https://code.jquery.com/jquery-3.1.1.min.js' + ) + with open(tmpfile) as f: + assert f.read().strip() == ref.strip() + + +def test_write_jsviewer_options(tmpdir): + t = Table() + t['a'] = [1, 2, 3, 4, 5] + t['b'] = ['a', 'b', 'c', 'd', 'e'] + t['a'].unit = 'm' + + tmpfile = tmpdir.join('test.html').strpath + + t.write(tmpfile, format='jsviewer', table_id='test', max_lines=3, + jskwargs={'display_length': 5}, table_class='display hover') + ref = REFERENCE % dict( + lines=format_lines(t['a'][:3], t['b'][:3]), + table_class='display hover', + table_id='test', + length='5', + display_length='5, 10, 25, 50, 100, 500, 1000', + datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', + datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', + jquery_url='https://code.jquery.com/jquery-3.1.1.min.js' + ) + with open(tmpfile) as f: + assert f.read().strip() == ref.strip() + + +def test_write_jsviewer_local(tmpdir): + t = Table() + t['a'] = [1, 2, 3, 4, 5] + t['b'] = ['a', 'b', 'c', 'd', 'e'] + t['a'].unit = 'm' + + tmpfile = tmpdir.join('test.html').strpath + + t.write(tmpfile, format='jsviewer', table_id='test', + jskwargs={'use_local_files': True}) + ref = REFERENCE % dict( + lines=format_lines(t['a'], t['b']), + table_class='display compact', + table_id='test', + length='50', + display_length='10, 25, 50, 100, 500, 1000', + datatables_css_url='file://' + join(EXTERN_DIR, 'css', 'jquery.dataTables.css'), + datatables_js_url='file://' + join(EXTERN_DIR, 'js', 'jquery.dataTables.min.js'), + jquery_url='file://' + join(EXTERN_DIR, 'js', 'jquery-3.1.1.min.js') + ) + with open(tmpfile) as f: + assert f.read().strip() == ref.strip() + + +@pytest.mark.skipif('not HAS_IPYTHON') +def test_show_in_notebook(): + t = Table() + t['a'] = [1, 2, 3, 4, 5] + t['b'] = ['b', 'c', 'a', 'd', 'e'] + + htmlstr_windx = t.show_in_notebook().data # should default to 'idx' + htmlstr_windx_named = t.show_in_notebook(show_row_index='realidx').data + htmlstr_woindx = t.show_in_notebook(show_row_index=False).data + + assert (textwrap.dedent(""" + idxab + 01b + 12c + 23a + 34d + 45e + """).strip() in htmlstr_windx) + + assert 'realidxab' in htmlstr_windx_named + + assert 'ab' in htmlstr_woindx diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_masked.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_masked.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92a73d56ec7fde01055db492ff2347f4a68c8bde Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_masked.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_pickle.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_pickle.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5caa82e3d53a01a2d652956e0a34430316d172cf Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_pickle.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_pprint.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_pprint.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e91e8226b100a0c900bf2c3a56e905914d0f5bd Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_pprint.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_row.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_row.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcbdadc369d43a4322882494be125460f675e2ad Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_row.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_subclass.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_subclass.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd3ca048945dad79aef1bcd0d59edae1863f0f9b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/table/tests/test_subclass.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..89efddebbe37d517ff525c9672af20117f46300d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/__init__.py @@ -0,0 +1,28 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This package contains utilities to run the astropy test suite, tools +for writing tests, and general tests that are not associated with a +particular package. +""" + +# NOTE: This is retained only for backwards compatibility. Affiliated packages +# should no longer import `disable_internet` from `astropy.tests`. It is now +# available from `pytest_remotedata`. However, this is not the recommended +# mechanism for controlling access to remote data in tests. Instead, packages +# should make use of decorators provided by the pytest_remotedata plugin: +# - `@pytest.mark.remote_data` for tests that require remote data access +# - `@pytest.mark.internet_off` for tests that should only run when remote data +# access is disabled. +# Remote data access for the test suite is controlled by the `--remote-data` +# command line flag. This is either passed to `pytest` directly or to the +# `setup.py test` command. +# +# TODO: This import should eventually be removed once backwards compatibility +# is no longer supported. + +from pkgutil import find_loader + +if find_loader('pytest_remotedata') is not None: + from pytest_remotedata import disable_internet +else: + from ..extern.plugins.pytest_remotedata import disable_internet diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..593b6adbb46219c2c3441cc4700f31a998a8b9b1 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/command.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/command.py new file mode 100644 index 0000000000000000000000000000000000000000..0184777d183ccc72e544466c0574c6661a776bec --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/command.py @@ -0,0 +1,332 @@ +""" +Implements the wrapper for the Astropy test runner in the form of the +``./setup.py test`` distutils command. +""" + + +import os +import shutil +import subprocess +import sys +import tempfile + +from setuptools import Command + +from ..extern import six + + +def _fix_user_options(options): + """ + This is for Python 2.x and 3.x compatibility. distutils expects Command + options to all be byte strings on Python 2 and Unicode strings on Python 3. + """ + + def to_str_or_none(x): + if x is None: + return None + return str(x) + + return [tuple(to_str_or_none(x) for x in y) for y in options] + + +class FixRemoteDataOption(type): + """ + This metaclass is used to catch cases where the user is running the tests + with --remote-data. We've now changed the --remote-data option so that it + takes arguments, but we still want --remote-data to work as before and to + enable all remote tests. With this metaclass, we can modify sys.argv + before distutils/setuptools try to parse the command-line options. + """ + def __init__(cls, name, bases, dct): + + try: + idx = sys.argv.index('--remote-data') + except ValueError: + pass + else: + sys.argv[idx] = '--remote-data=any' + + try: + idx = sys.argv.index('-R') + except ValueError: + pass + else: + sys.argv[idx] = '-R=any' + + return super(FixRemoteDataOption, cls).__init__(name, bases, dct) + + +@six.add_metaclass(FixRemoteDataOption) +class AstropyTest(Command, object): + description = 'Run the tests for this package' + + user_options = [ + ('package=', 'P', + "The name of a specific package to test, e.g. 'io.fits' or 'utils'. " + "If nothing is specified, all default tests are run."), + ('test-path=', 't', + 'Specify a test location by path. If a relative path to a .py file, ' + 'it is relative to the built package, so e.g., a leading "astropy/" ' + 'is necessary. If a relative path to a .rst file, it is relative to ' + 'the directory *below* the --docs-path directory, so a leading ' + '"docs/" is usually necessary. May also be an absolute path.'), + ('verbose-results', 'V', + 'Turn on verbose output from pytest.'), + ('plugins=', 'p', + 'Plugins to enable when running pytest.'), + ('pastebin=', 'b', + "Enable pytest pastebin output. Either 'all' or 'failed'."), + ('args=', 'a', + 'Additional arguments to be passed to pytest.'), + ('remote-data=', 'R', 'Run tests that download remote data. Should be ' + 'one of none/astropy/any (defaults to none).'), + ('pep8', '8', + 'Enable PEP8 checking and disable regular tests. ' + 'Requires the pytest-pep8 plugin.'), + ('pdb', 'd', + 'Start the interactive Python debugger on errors.'), + ('coverage', 'c', + 'Create a coverage report. Requires the coverage package.'), + ('open-files', 'o', 'Fail if any tests leave files open. Requires the ' + 'psutil package.'), + ('parallel=', 'j', + 'Run the tests in parallel on the specified number of ' + 'CPUs. If "auto", all the cores on the machine will be ' + 'used. Requires the pytest-xdist plugin.'), + ('docs-path=', None, + 'The path to the documentation .rst files. If not provided, and ' + 'the current directory contains a directory called "docs", that ' + 'will be used.'), + ('skip-docs', None, + "Don't test the documentation .rst files."), + ('repeat=', None, + 'How many times to repeat each test (can be used to check for ' + 'sporadic failures).'), + ('temp-root=', None, + 'The root directory in which to create the temporary testing files. ' + 'If unspecified the system default is used (e.g. /tmp) as explained ' + 'in the documentation for tempfile.mkstemp.') + ] + + user_options = _fix_user_options(user_options) + + package_name = '' + + def initialize_options(self): + self.package = None + self.test_path = None + self.verbose_results = False + self.plugins = None + self.pastebin = None + self.args = None + self.remote_data = 'none' + self.pep8 = False + self.pdb = False + self.coverage = False + self.open_files = False + self.parallel = 0 + self.docs_path = None + self.skip_docs = False + self.repeat = None + self.temp_root = None + + def finalize_options(self): + # Normally we would validate the options here, but that's handled in + # run_tests + pass + + def generate_testing_command(self): + """ + Build a Python script to run the tests. + """ + + cmd_pre = '' # Commands to run before the test function + cmd_post = '' # Commands to run after the test function + + if self.coverage: + pre, post = self._generate_coverage_commands() + cmd_pre += pre + cmd_post += post + + if six.PY2: + set_flag = "import __builtin__; __builtin__._ASTROPY_TEST_ = True" + else: + set_flag = "import builtins; builtins._ASTROPY_TEST_ = True" + + cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = (' + '{1.package_name}.test(' + 'package={1.package!r}, ' + 'test_path={1.test_path!r}, ' + 'args={1.args!r}, ' + 'plugins={1.plugins!r}, ' + 'verbose={1.verbose_results!r}, ' + 'pastebin={1.pastebin!r}, ' + 'remote_data={1.remote_data!r}, ' + 'pep8={1.pep8!r}, ' + 'pdb={1.pdb!r}, ' + 'open_files={1.open_files!r}, ' + 'parallel={1.parallel!r}, ' + 'docs_path={1.docs_path!r}, ' + 'skip_docs={1.skip_docs!r}, ' + 'repeat={1.repeat!r})); ' + '{cmd_post}' + 'sys.exit(result)') + return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post) + + def run(self): + """ + Run the tests! + """ + # Install the runtime and test dependencies. + if self.distribution.install_requires: + self.distribution.fetch_build_eggs( + self.distribution.install_requires) + if self.distribution.tests_require: + self.distribution.fetch_build_eggs(self.distribution.tests_require) + + # Ensure there is a doc path + if self.docs_path is None: + cfg_docs_dir = self.distribution.get_option_dict('build_docs').get('source_dir', None) + + # Some affiliated packages use this. + # See astropy/package-template#157 + if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]): + self.docs_path = os.path.abspath(cfg_docs_dir[1]) + + # fall back on a default path of "docs" + elif os.path.exists('docs'): # pragma: no cover + self.docs_path = os.path.abspath('docs') + + # Build a testing install of the package + self._build_temp_install() + + # Run everything in a try: finally: so that the tmp dir gets deleted. + try: + # Construct this modules testing command + cmd = self.generate_testing_command() + + # Run the tests in a subprocess--this is necessary since + # new extension modules may have appeared, and this is the + # easiest way to set up a new environment + + # On Python 3.x prior to 3.3, the creation of .pyc files + # is not atomic. py.test jumps through some hoops to make + # this work by parsing import statements and carefully + # importing files atomically. However, it can't detect + # when __import__ is used, so its carefulness still fails. + # The solution here (admittedly a bit of a hack), is to + # turn off the generation of .pyc files altogether by + # passing the `-B` switch to `python`. This does mean + # that each core will have to compile .py file to bytecode + # itself, rather than getting lucky and borrowing the work + # already done by another core. Compilation is an + # insignificant fraction of total testing time, though, so + # it's probably not worth worrying about. + testproc = subprocess.Popen( + [sys.executable, '-B', '-c', cmd], + cwd=self.testing_path, close_fds=False) + retcode = testproc.wait() + except KeyboardInterrupt: + import signal + # If a keyboard interrupt is handled, pass it to the test + # subprocess to prompt pytest to initiate its teardown + testproc.send_signal(signal.SIGINT) + retcode = testproc.wait() + finally: + # Remove temporary directory + shutil.rmtree(self.tmp_dir) + + raise SystemExit(retcode) + + def _build_temp_install(self): + """ + Install the package and to a temporary directory for the purposes of + testing. This allows us to test the install command, include the + entry points, and also avoids creating pyc and __pycache__ directories + inside the build directory + """ + + # On OSX the default path for temp files is under /var, but in most + # cases on OSX /var is actually a symlink to /private/var; ensure we + # dereference that link, because py.test is very sensitive to relative + # paths... + + tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-', + dir=self.temp_root) + self.tmp_dir = os.path.realpath(tmp_dir) + + # We now install the package to the temporary directory. We do this + # rather than build and copy because this will ensure that e.g. entry + # points work. + self.reinitialize_command('install') + install_cmd = self.distribution.get_command_obj('install') + install_cmd.prefix = self.tmp_dir + self.run_command('install') + + # We now get the path to the site-packages directory that was created + # inside self.tmp_dir + install_cmd = self.get_finalized_command('install') + self.testing_path = install_cmd.install_lib + + # Ideally, docs_path is set properly in run(), but if it is still + # not set here, do not pretend it is, otherwise bad things happen. + # See astropy/package-template#157 + if self.docs_path is not None: + new_docs_path = os.path.join(self.testing_path, + os.path.basename(self.docs_path)) + shutil.copytree(self.docs_path, new_docs_path) + self.docs_path = new_docs_path + + shutil.copy('setup.cfg', self.testing_path) + + def _generate_coverage_commands(self): + """ + This method creates the post and pre commands if coverage is to be + generated + """ + if self.parallel != 0: + raise ValueError( + "--coverage can not be used with --parallel") + + try: + import coverage # pylint: disable=W0611 + except ImportError: + raise ImportError( + "--coverage requires that the coverage package is " + "installed.") + + # Don't use get_pkg_data_filename here, because it + # requires importing astropy.config and thus screwing + # up coverage results for those packages. + coveragerc = os.path.join( + self.testing_path, self.package_name.replace('.', '/'), + 'tests', 'coveragerc') + + # We create a coveragerc that is specific to the version + # of Python we're running, so that we can mark branches + # as being specifically for Python 2 or Python 3 + with open(coveragerc, 'r') as fd: + coveragerc_content = fd.read() + if not six.PY2: + ignore_python_version = '2' + else: + ignore_python_version = '3' + coveragerc_content = coveragerc_content.replace( + "{ignore_python_version}", ignore_python_version).replace( + "{packagename}", self.package_name.replace('.', '/')) + tmp_coveragerc = os.path.join(self.tmp_dir, 'coveragerc') + with open(tmp_coveragerc, 'wb') as tmp: + tmp.write(coveragerc_content.encode('utf-8')) + + cmd_pre = ( + 'import coverage; ' + 'cov = coverage.coverage(data_file=r"{0}", config_file=r"{1}"); ' + 'cov.start();'.format( + os.path.abspath(".coverage"), os.path.abspath(tmp_coveragerc))) + cmd_post = ( + 'cov.stop(); ' + 'from astropy.tests.helper import _save_coverage; ' + '_save_coverage(cov, result, r"{0}", r"{1}");'.format( + os.path.abspath('.'), os.path.abspath(self.testing_path))) + + return cmd_pre, cmd_post diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/command.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/command.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bffc950feaeed82f59128ba04255cbf91ac6cb53 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/command.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/coveragerc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/coveragerc new file mode 100644 index 0000000000000000000000000000000000000000..7e77ef1a86eda77b9782474c763956a1889bb7ca --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/coveragerc @@ -0,0 +1,33 @@ +[run] +source = astropy +omit = + astropy/__init__* + astropy/conftest.py + astropy/*setup* + astropy/*/tests/* + astropy/tests/test_* + astropy/extern/* + astropy/sphinx/* + astropy/utils/compat/* + astropy/version* + astropy/wcs/docstrings* + astropy/_erfa/* + +[report] +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain about packages we have installed + except ImportError + + # Don't complain if tests don't hit assertions + raise AssertionError + raise NotImplementedError + + # Don't complain about script hooks + def main\(.*\): + + # Ignore branches that don't pertain to this version of Python + pragma: py{ignore_python_version} + six.PY{ignore_python_version} \ No newline at end of file diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/disable_internet.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/disable_internet.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd0b924eae7e133fb90d61e3cc7aa0dc0e10b53 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/disable_internet.py @@ -0,0 +1,153 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import contextlib +import socket + +from ..extern.six.moves import urllib + +# save original socket method for restoration +# These are global so that re-calling the turn_off_internet function doesn't +# overwrite them again +socket_original = socket.socket +socket_create_connection = socket.create_connection +socket_bind = socket.socket.bind +socket_connect = socket.socket.connect + + +INTERNET_OFF = False + +# urllib2 uses a global variable to cache its default "opener" for opening +# connections for various protocols; we store it off here so we can restore to +# the default after re-enabling internet use +_orig_opener = None + + +# ::1 is apparently another valid name for localhost? +# it is returned by getaddrinfo when that function is given localhost + +def check_internet_off(original_function, allow_astropy_data=False): + """ + Wraps ``original_function``, which in most cases is assumed + to be a `socket.socket` method, to raise an `IOError` for any operations + on non-local AF_INET sockets. + """ + + def new_function(*args, **kwargs): + if isinstance(args[0], socket.socket): + if not args[0].family in (socket.AF_INET, socket.AF_INET6): + # Should be fine in all but some very obscure cases + # More to the point, we don't want to affect AF_UNIX + # sockets. + return original_function(*args, **kwargs) + host = args[1][0] + addr_arg = 1 + valid_hosts = ('localhost', '127.0.0.1', '::1') + else: + # The only other function this is used to wrap currently is + # socket.create_connection, which should be passed a 2-tuple, but + # we'll check just in case + if not (isinstance(args[0], tuple) and len(args[0]) == 2): + return original_function(*args, **kwargs) + + host = args[0][0] + addr_arg = 0 + valid_hosts = ('localhost', '127.0.0.1') + + if allow_astropy_data: + for valid_host in ('data.astropy.org', 'astropy.stsci.edu', 'www.astropy.org'): + valid_host_ip = socket.gethostbyname(valid_host) + valid_hosts += (valid_host, valid_host_ip) + + hostname = socket.gethostname() + fqdn = socket.getfqdn() + + if host in (hostname, fqdn): + host = 'localhost' + new_addr = (host, args[addr_arg][1]) + args = args[:addr_arg] + (new_addr,) + args[addr_arg + 1:] + + if any(h in host for h in valid_hosts): + return original_function(*args, **kwargs) + else: + raise IOError("An attempt was made to connect to the internet " + "by a test that was not marked `remote_data`. The " + "requested host was: {0}".format(host)) + return new_function + + +def turn_off_internet(verbose=False, allow_astropy_data=False): + """ + Disable internet access via python by preventing connections from being + created using the socket module. Presumably this could be worked around by + using some other means of accessing the internet, but all default python + modules (urllib, requests, etc.) use socket [citation needed]. + """ + + global INTERNET_OFF + global _orig_opener + + if INTERNET_OFF: + return + + INTERNET_OFF = True + + __tracebackhide__ = True + if verbose: + print("Internet access disabled") + + # Update urllib2 to force it not to use any proxies + # Must use {} here (the default of None will kick off an automatic search + # for proxies) + _orig_opener = urllib.request.build_opener() + no_proxy_handler = urllib.request.ProxyHandler({}) + opener = urllib.request.build_opener(no_proxy_handler) + urllib.request.install_opener(opener) + + socket.create_connection = check_internet_off(socket_create_connection, allow_astropy_data=allow_astropy_data) + socket.socket.bind = check_internet_off(socket_bind, allow_astropy_data=allow_astropy_data) + socket.socket.connect = check_internet_off(socket_connect, allow_astropy_data=allow_astropy_data) + + return socket + + +def turn_on_internet(verbose=False): + """ + Restore internet access. Not used, but kept in case it is needed. + """ + + global INTERNET_OFF + global _orig_opener + + if not INTERNET_OFF: + return + + INTERNET_OFF = False + + if verbose: + print("Internet access enabled") + + urllib.request.install_opener(_orig_opener) + + socket.create_connection = socket_create_connection + socket.socket.bind = socket_bind + socket.socket.connect = socket_connect + return socket + + +@contextlib.contextmanager +def no_internet(verbose=False): + """Context manager to temporarily disable internet access (if not already + disabled). If it was already disabled before entering the context manager + (i.e. `turn_off_internet` was called previously) then this is a no-op and + leaves internet access disabled until a manual call to `turn_on_internet`. + """ + + already_disabled = INTERNET_OFF + + turn_off_internet(verbose=verbose) + try: + yield + finally: + if not already_disabled: + turn_on_internet(verbose=verbose) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/disable_internet.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/disable_internet.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18b2060f7f5cf650e61d7735ff7fa270c577376c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/disable_internet.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/helper.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..f69cb5aefaf868e04ba33a77ceb329524a6f5742 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/helper.py @@ -0,0 +1,556 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This module provides the tools used to internally run the astropy test suite +from the installed astropy. It makes use of the `pytest` testing framework. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import functools +import os +import sys +import types +import warnings + +import pytest + +from ..extern import six +from ..extern.six.moves import cPickle as pickle + +try: + # Import pkg_resources to prevent it from issuing warnings upon being + # imported from within py.test. See + # https://github.com/astropy/astropy/pull/537 for a detailed explanation. + import pkg_resources # pylint: disable=W0611 +except ImportError: + pass + +from ..utils.exceptions import (AstropyDeprecationWarning, + AstropyPendingDeprecationWarning) + + +# For backward-compatibility with affiliated packages +from .runner import TestRunner # pylint: disable=W0611 + +__all__ = ['raises', 'enable_deprecations_as_exceptions', 'remote_data', + 'treat_deprecations_as_exceptions', 'catch_warnings', + 'assert_follows_unicode_guidelines', 'quantity_allclose', + 'assert_quantity_allclose', 'check_pickling_recovery', + 'pickle_protocol', 'generic_recursive_equality_test'] + +# pytest marker to mark tests which get data from the web +remote_data = pytest.mark.remote_data + + +# This is for Python 2.x and 3.x compatibility. distutils expects +# options to all be byte strings on Python 2 and Unicode strings on +# Python 3. +def _fix_user_options(options): + def to_str_or_none(x): + if x is None: + return None + return str(x) + + return [tuple(to_str_or_none(x) for x in y) for y in options] + + +def _save_coverage(cov, result, rootdir, testing_path): + """ + This method is called after the tests have been run in coverage mode + to cleanup and then save the coverage data and report. + """ + from ..utils.console import color_print + + if result != 0: + return + + # The coverage report includes the full path to the temporary + # directory, so we replace all the paths with the true source + # path. Note that this will not work properly for packages that still + # rely on 2to3. + try: + # Coverage 4.0: _harvest_data has been renamed to get_data, the + # lines dict is private + cov.get_data() + except AttributeError: + # Coverage < 4.0 + cov._harvest_data() + lines = cov.data.lines + else: + lines = cov.data._lines + + for key in list(lines.keys()): + new_path = os.path.relpath( + os.path.realpath(key), + os.path.realpath(testing_path)) + new_path = os.path.abspath( + os.path.join(rootdir, new_path)) + lines[new_path] = lines.pop(key) + + color_print('Saving coverage data in .coverage...', 'green') + cov.save() + + color_print('Saving HTML coverage report in htmlcov...', 'green') + cov.html_report(directory=os.path.join(rootdir, 'htmlcov')) + + +class raises(object): + """ + A decorator to mark that a test should raise a given exception. + Use as follows:: + + @raises(ZeroDivisionError) + def test_foo(): + x = 1/0 + + This can also be used a context manager, in which case it is just + an alias for the ``pytest.raises`` context manager (because the + two have the same name this help avoid confusion by being + flexible). + """ + + # pep-8 naming exception -- this is a decorator class + def __init__(self, exc): + self._exc = exc + self._ctx = None + + def __call__(self, func): + @functools.wraps(func) + def run_raises_test(*args, **kwargs): + pytest.raises(self._exc, func, *args, **kwargs) + return run_raises_test + + def __enter__(self): + self._ctx = pytest.raises(self._exc) + return self._ctx.__enter__() + + def __exit__(self, *exc_info): + return self._ctx.__exit__(*exc_info) + + +_deprecations_as_exceptions = False +_include_astropy_deprecations = True +_modules_to_ignore_on_import = set([ + 'compiler', # A deprecated stdlib module used by py.test + 'scipy', + 'pygments', + 'ipykernel', + 'IPython', # deprecation warnings for async and await + 'setuptools']) +_warnings_to_ignore_entire_module = set([]) +_warnings_to_ignore_by_pyver = { + (2, 7): set([ + # Deprecation warnings ahead of pytest 4.x + r"MarkInfo objects are deprecated"]), + (3, 4): set([ + # py.test reads files with the 'U' flag, which is now + # deprecated in Python 3.4. + r"'U' mode is deprecated", + # BeautifulSoup4 triggers warning in stdlib's html module.x + r"The strict argument and mode are deprecated\.", + r"The value of convert_charrefs will become True in 3\.5\. " + r"You are encouraged to set the value explicitly\.", + # Deprecation warnings ahead of pytest 4.x + r"MarkInfo objects are deprecated"]), + (3, 5): set([ + # py.test reads files with the 'U' flag, which is + # deprecated. + r"'U' mode is deprecated", + # py.test raised this warning in inspect on Python 3.5. + # See https://github.com/pytest-dev/pytest/pull/1009 + # Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec() + r"inspect\.getargspec\(\) is deprecated, use " + r"inspect\.signature\(\) instead", + # https://github.com/astropy/astropy/pull/7372 + r"Importing from numpy\.testing\.decorators is deprecated, import from numpy\.testing instead\.", + # Deprecation warnings ahead of pytest 4.x + r"MarkInfo objects are deprecated"]), + (3, 6): set([ + # py.test reads files with the 'U' flag, which is + # deprecated. + r"'U' mode is deprecated", + # inspect raises this slightly different warning on Python 3.6-3.7. + # Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec() + r"inspect\.getargspec\(\) is deprecated, use " + r"inspect\.signature\(\) or inspect\.getfullargspec\(\)", + # https://github.com/astropy/astropy/pull/7372 + r"Importing from numpy\.testing\.decorators is deprecated, import from numpy\.testing instead\.", + # Deprecation warnings ahead of pytest 4.x + r"MarkInfo objects are deprecated"]), + (3, 7): set([ + # py.test reads files with the 'U' flag, which is + # deprecated. + r"'U' mode is deprecated", + # inspect raises this slightly different warning on Python 3.6-3.7. + # Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec() + r"inspect\.getargspec\(\) is deprecated, use " + r"inspect\.signature\(\) or inspect\.getfullargspec\(\)", + # https://github.com/astropy/astropy/pull/7372 + r"Importing from numpy\.testing\.decorators is deprecated, import from numpy\.testing instead\.", + # Deprecation warnings ahead of pytest 4.x + r"MarkInfo objects are deprecated", + # Deprecation warning for collections.abc, fixed in Astropy master but + # still used in the LTS branch, in lxml, and maybe others + r"Using or importing the ABCs from 'collections'", + ]), +} + + +def enable_deprecations_as_exceptions(include_astropy_deprecations=True, + modules_to_ignore_on_import=[], + warnings_to_ignore_entire_module=[], + warnings_to_ignore_by_pyver={}): + """ + Turn on the feature that turns deprecations into exceptions. + + Parameters + ---------- + include_astropy_deprecations : bool + If set to `True`, ``AstropyDeprecationWarning`` and + ``AstropyPendingDeprecationWarning`` are also turned into exceptions. + + modules_to_ignore_on_import : list of str + List of additional modules that generate deprecation warnings + on import, which are to be ignored. By default, these are already + included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and + ``setuptools``. + + warnings_to_ignore_entire_module : list of str + List of modules with deprecation warnings to ignore completely, + not just during import. If ``include_astropy_deprecations=True`` + is given, ``AstropyDeprecationWarning`` and + ``AstropyPendingDeprecationWarning`` are also ignored for the modules. + + warnings_to_ignore_by_pyver : dict + Dictionary mapping tuple of ``(major, minor)`` Python version to + a list of deprecation warning messages to ignore. This is in + addition of those already ignored by default + (see ``_warnings_to_ignore_by_pyver`` values). + + """ + global _deprecations_as_exceptions + _deprecations_as_exceptions = True + + global _include_astropy_deprecations + _include_astropy_deprecations = include_astropy_deprecations + + global _modules_to_ignore_on_import + _modules_to_ignore_on_import.update(modules_to_ignore_on_import) + + global _warnings_to_ignore_entire_module + _warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module) + + global _warnings_to_ignore_by_pyver + for key, val in six.iteritems(warnings_to_ignore_by_pyver): + if key in _warnings_to_ignore_by_pyver: + _warnings_to_ignore_by_pyver[key].update(val) + else: + _warnings_to_ignore_by_pyver[key] = set(val) + + +def treat_deprecations_as_exceptions(): + """ + Turn all DeprecationWarnings (which indicate deprecated uses of + Python itself or Numpy, but not within Astropy, where we use our + own deprecation warning class) into exceptions so that we find + out about them early. + + This completely resets the warning filters and any "already seen" + warning state. + """ + # First, totally reset the warning state. The modules may change during + # this iteration thus we copy the original state to a list to iterate + # on. See https://github.com/astropy/astropy/pull/5513. + for module in list(six.itervalues(sys.modules)): + # We don't want to deal with six.MovedModules, only "real" + # modules. + if (isinstance(module, types.ModuleType) and + hasattr(module, '__warningregistry__')): + del module.__warningregistry__ + + if not _deprecations_as_exceptions: + return + + warnings.resetwarnings() + + # Hide the next couple of DeprecationWarnings + warnings.simplefilter('ignore', DeprecationWarning) + # Here's the wrinkle: a couple of our third-party dependencies + # (py.test and scipy) are still using deprecated features + # themselves, and we'd like to ignore those. Fortunately, those + # show up only at import time, so if we import those things *now*, + # before we turn the warnings into exceptions, we're golden. + for m in _modules_to_ignore_on_import: + try: + __import__(m) + except ImportError: + pass + + # Now, start over again with the warning filters + warnings.resetwarnings() + # Now, turn DeprecationWarnings into exceptions + _all_warns = [DeprecationWarning] + + # Only turn astropy deprecation warnings into exceptions if requested + if _include_astropy_deprecations: + _all_warns += [AstropyDeprecationWarning, + AstropyPendingDeprecationWarning] + + for w in _all_warns: + warnings.filterwarnings("error", ".*", w) + + # This ignores all deprecation warnings from given module(s), + # not just on import, for use of Astropy affiliated packages. + for m in _warnings_to_ignore_entire_module: + for w in _all_warns: + warnings.filterwarnings('ignore', category=w, module=m) + + for v in _warnings_to_ignore_by_pyver: + if sys.version_info[:2] == v: + for s in _warnings_to_ignore_by_pyver[v]: + warnings.filterwarnings("ignore", s, DeprecationWarning) + + +class catch_warnings(warnings.catch_warnings): + """ + A high-powered version of warnings.catch_warnings to use for testing + and to make sure that there is no dependence on the order in which + the tests are run. + + This completely blitzes any memory of any warnings that have + appeared before so that all warnings will be caught and displayed. + + ``*args`` is a set of warning classes to collect. If no arguments are + provided, all warnings are collected. + + Use as follows:: + + with catch_warnings(MyCustomWarning) as w: + do.something.bad() + assert len(w) > 0 + """ + + def __init__(self, *classes): + super(catch_warnings, self).__init__(record=True) + self.classes = classes + + def __enter__(self): + warning_list = super(catch_warnings, self).__enter__() + treat_deprecations_as_exceptions() + if len(self.classes) == 0: + warnings.simplefilter('always') + else: + warnings.simplefilter('ignore') + for cls in self.classes: + warnings.simplefilter('always', cls) + return warning_list + + def __exit__(self, type, value, traceback): + treat_deprecations_as_exceptions() + + +class ignore_warnings(catch_warnings): + """ + This can be used either as a context manager or function decorator to + ignore all warnings that occur within a function or block of code. + + An optional category option can be supplied to only ignore warnings of a + certain category or categories (if a list is provided). + """ + + def __init__(self, category=None): + super(ignore_warnings, self).__init__() + + if isinstance(category, type) and issubclass(category, Warning): + self.category = [category] + else: + self.category = category + + def __call__(self, func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + # Originally this just reused self, but that doesn't work if the + # function is called more than once so we need to make a new + # context manager instance for each call + with self.__class__(category=self.category): + return func(*args, **kwargs) + + return wrapper + + def __enter__(self): + retval = super(ignore_warnings, self).__enter__() + if self.category is not None: + for category in self.category: + warnings.simplefilter('ignore', category) + else: + warnings.simplefilter('ignore') + return retval + + +def assert_follows_unicode_guidelines( + x, roundtrip=None): + """ + Test that an object follows our Unicode policy. See + "Unicode guidelines" in the coding guidelines. + + Parameters + ---------- + x : object + The instance to test + + roundtrip : module, optional + When provided, this namespace will be used to evaluate + ``repr(x)`` and ensure that it roundtrips. It will also + ensure that ``__bytes__(x)`` and ``__unicode__(x)`` roundtrip. + If not provided, no roundtrip testing will be performed. + """ + from .. import conf + from ..extern import six + + with conf.set_temp('unicode_output', False): + bytes_x = bytes(x) + unicode_x = six.text_type(x) + repr_x = repr(x) + + assert isinstance(bytes_x, bytes) + bytes_x.decode('ascii') + assert isinstance(unicode_x, six.text_type) + unicode_x.encode('ascii') + assert isinstance(repr_x, six.string_types) + if isinstance(repr_x, bytes): + repr_x.decode('ascii') + else: + repr_x.encode('ascii') + + if roundtrip is not None: + assert x.__class__(bytes_x) == x + assert x.__class__(unicode_x) == x + assert eval(repr_x, roundtrip) == x + + with conf.set_temp('unicode_output', True): + bytes_x = bytes(x) + unicode_x = six.text_type(x) + repr_x = repr(x) + + assert isinstance(bytes_x, bytes) + bytes_x.decode('ascii') + assert isinstance(unicode_x, six.text_type) + assert isinstance(repr_x, six.string_types) + if isinstance(repr_x, bytes): + repr_x.decode('ascii') + else: + repr_x.encode('ascii') + + if roundtrip is not None: + assert x.__class__(bytes_x) == x + assert x.__class__(unicode_x) == x + assert eval(repr_x, roundtrip) == x + + +@pytest.fixture(params=[0, 1, -1]) +def pickle_protocol(request): + """ + Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). + (Originally from astropy.table.tests.test_pickle) + """ + return request.param + + +def generic_recursive_equality_test(a, b, class_history): + """ + Check if the attributes of a and b are equal. Then, + check if the attributes of the attributes are equal. + """ + dict_a = a.__dict__ + dict_b = b.__dict__ + for key in dict_a: + assert key in dict_b,\ + "Did not pickle {0}".format(key) + if hasattr(dict_a[key], '__eq__'): + eq = (dict_a[key] == dict_b[key]) + if '__iter__' in dir(eq): + eq = (False not in eq) + assert eq, "Value of {0} changed by pickling".format(key) + + if hasattr(dict_a[key], '__dict__'): + if dict_a[key].__class__ in class_history: + # attempt to prevent infinite recursion + pass + else: + new_class_history = [dict_a[key].__class__] + new_class_history.extend(class_history) + generic_recursive_equality_test(dict_a[key], + dict_b[key], + new_class_history) + + +def check_pickling_recovery(original, protocol): + """ + Try to pickle an object. If successful, make sure + the object's attributes survived pickling and unpickling. + """ + f = pickle.dumps(original, protocol=protocol) + unpickled = pickle.loads(f) + class_history = [original.__class__] + generic_recursive_equality_test(original, unpickled, + class_history) + + +def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None, + **kwargs): + """ + Raise an assertion if two objects are not equal up to desired tolerance. + + This is a :class:`~astropy.units.Quantity`-aware version of + :func:`numpy.testing.assert_allclose`. + """ + import numpy as np + np.testing.assert_allclose(*_unquantify_allclose_arguments(actual, desired, + rtol, atol), + **kwargs) + + +def quantity_allclose(a, b, rtol=1.e-5, atol=None, **kwargs): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This is a :class:`~astropy.units.Quantity`-aware version of + :func:`numpy.allclose`. + """ + import numpy as np + return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol), + **kwargs) + + +def _unquantify_allclose_arguments(actual, desired, rtol, atol): + from .. import units as u + + actual = u.Quantity(actual, subok=True, copy=False) + + desired = u.Quantity(desired, subok=True, copy=False) + try: + desired = desired.to(actual.unit) + except u.UnitsError: + raise u.UnitsError("Units for 'desired' ({0}) and 'actual' ({1}) " + "are not convertible" + .format(desired.unit, actual.unit)) + + if atol is None: + # by default, we assume an absolute tolerance of 0 + atol = u.Quantity(0) + else: + atol = u.Quantity(atol, subok=True, copy=False) + try: + atol = atol.to(actual.unit) + except u.UnitsError: + raise u.UnitsError("Units for 'atol' ({0}) and 'actual' ({1}) " + "are not convertible" + .format(atol.unit, actual.unit)) + + rtol = u.Quantity(rtol, subok=True, copy=False) + try: + rtol = rtol.to(u.dimensionless_unscaled) + except Exception: + raise u.UnitsError("`rtol` should be dimensionless") + + return actual.value, desired.value, rtol.value, atol.value diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/helper.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/helper.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbf6b40f9c531b678659a63f6e1ba14873522027 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/helper.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/image_tests.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/image_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..c2f90f2e0de5d5409afc78de0c6f229c8d9025be --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/image_tests.py @@ -0,0 +1,21 @@ +import matplotlib +from matplotlib import pyplot as plt + +from ..utils.decorators import wraps + +MPL_VERSION = matplotlib.__version__ + +ROOT = "http://{server}/testing/astropy/2018-02-01T23:31:45.013149/{mpl_version}/" + +IMAGE_REFERENCE_DIR = (ROOT.format(server='data.astropy.org', mpl_version=MPL_VERSION[:3] + '.x') + ',' + + ROOT.format(server='www.astropy.org/astropy-data', mpl_version=MPL_VERSION[:3] + '.x')) + + +def ignore_matplotlibrc(func): + # This is a decorator for tests that use matplotlib but not pytest-mpl + # (which already handles rcParams) + @wraps(func) + def wrapper(*args, **kwargs): + with plt.style.context({}, after_reset=True): + return func(*args, **kwargs) + return wrapper diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/image_tests.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/image_tests.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46e6ebd6a871f3201f6fcf467b80e06935306e1c Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/image_tests.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/output_checker.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/output_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..238a522e193e3302eb9fc2f5adb44946efcc9d91 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/output_checker.py @@ -0,0 +1,186 @@ +""" +Implements a replacement for `doctest.OutputChecker` that handles certain +normalizations of Python expression output. See the docstring on +`AstropyOutputChecker` for more details. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import doctest +import re + +import numpy as np + +from ..extern import six +from ..extern.six.moves import zip + +# Much of this code, particularly the parts of floating point handling, is +# borrowed from the SymPy project with permission. See licenses/SYMPY.rst +# for the full SymPy license. + +FIX = doctest.register_optionflag('FIX') +FLOAT_CMP = doctest.register_optionflag('FLOAT_CMP') +IGNORE_OUTPUT = doctest.register_optionflag('IGNORE_OUTPUT') +IGNORE_OUTPUT_2 = doctest.register_optionflag('IGNORE_OUTPUT_2') +IGNORE_OUTPUT_3 = doctest.register_optionflag('IGNORE_OUTPUT_3') + + +class AstropyOutputChecker(doctest.OutputChecker): + """ + - Removes u'' prefixes on string literals + - Ignores the 'L' suffix on long integers + - In Numpy dtype strings, removes the leading pipe, i.e. '|S9' -> + 'S9'. Numpy 1.7 no longer includes it in display. + - Supports the FLOAT_CMP flag, which parses floating point values + out of the output and compares their numerical values rather than their + string representation. This naturally supports complex numbers as well + (simply by comparing their real and imaginary parts separately). + """ + + _original_output_checker = doctest.OutputChecker + + _str_literal_re = re.compile( + r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _byteorder_re = re.compile( + r"([\'\"])[|<>]([biufcSaUV][0-9]+)([\'\"])", re.UNICODE) + _fix_32bit_re = re.compile( + r"([\'\"])([iu])[48]([\'\"])", re.UNICODE) + _long_int_re = re.compile( + r"([0-9]+)L", re.UNICODE) + + def __init__(self): + # NOTE OutputChecker is an old-style class with no __init__ method, + # so we can't call the base class version of __init__ here + + exp = r'(?:e[+-]?\d+)' + + got_floats = (r'\s*([+-]?\d+\.\d*{0}?|' + r'[+-]?\.\d+{0}?|' + r'[+-]?\d+{0}|' + r'nan|' + r'[+-]?inf)').format(exp) + + # floats in the 'want' string may contain ellipses + want_floats = got_floats + r'(\.{3})?' + + front_sep = r'\s|[*+-,<=(\[]' + back_sep = front_sep + r'|[>j)\]]' + + fbeg = r'^{}(?={}|$)'.format(got_floats, back_sep) + fmidend = r'(?<={}){}(?={}|$)'.format(front_sep, got_floats, back_sep) + self.num_got_rgx = re.compile(r'({}|{})'.format(fbeg, fmidend)) + + fbeg = r'^{}(?={}|$)'.format(want_floats, back_sep) + fmidend = r'(?<={}){}(?={}|$)'.format(front_sep, want_floats, back_sep) + self.num_want_rgx = re.compile(r'({}|{})'.format(fbeg, fmidend)) + + def do_fixes(self, want, got): + want = re.sub(self._str_literal_re, r'\1\2', want) + want = re.sub(self._byteorder_re, r'\1\2\3', want) + want = re.sub(self._fix_32bit_re, r'\1\2\3', want) + want = re.sub(self._long_int_re, r'\1', want) + + got = re.sub(self._str_literal_re, r'\1\2', got) + got = re.sub(self._byteorder_re, r'\1\2\3', got) + got = re.sub(self._fix_32bit_re, r'\1\2\3', got) + got = re.sub(self._long_int_re, r'\1', got) + + return want, got + + def normalize_floats(self, want, got, flags): + """ + Alternative to the built-in check_output that also handles parsing + float values and comparing their numeric values rather than their + string representations. + + This requires rewriting enough of the basic check_output that, when + FLOAT_CMP is enabled, it totally takes over for check_output. + """ + + # Handle the common case first, for efficiency: + # if they're string-identical, always return true. + if got == want: + return True + + # TODO parse integers as well ? + # Parse floats and compare them. If some of the parsed floats contain + # ellipses, skip the comparison. + matches = self.num_got_rgx.finditer(got) + numbers_got = [match.group(1) for match in matches] # list of strs + matches = self.num_want_rgx.finditer(want) + numbers_want = [match.group(1) for match in matches] # list of strs + if len(numbers_got) != len(numbers_want): + return False + if len(numbers_got) > 0: + nw_ = [] + for ng, nw in zip(numbers_got, numbers_want): + if '...' in nw: + nw_.append(ng) + continue + else: + nw_.append(nw) + + if not np.allclose(float(ng), float(nw), equal_nan=True): + return False + + # replace all floats in the "got" string by those from "wanted". + # TODO: can this be done more elegantly? Used to replace all with + # '{}' and then format, but this is problematic if the string + # contains other curly braces (e.g., from a dict). + got = self.num_got_rgx.sub(lambda x: nw_.pop(0), got) + + # can be used as a special sequence to signify a + # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. + if not (flags & doctest.DONT_ACCEPT_BLANKLINE): + # Replace in want with a blank line. + want = re.sub(r'(?m)^{}\s*?$'.format(re.escape(doctest.BLANKLINE_MARKER)), + '', want) + # If a line in got contains only spaces, then remove the + # spaces. + got = re.sub(r'(?m)^\s*?$', '', got) + if got == want: + return True + + # This flag causes doctest to ignore any differences in the + # contents of whitespace strings. Note that this can be used + # in conjunction with the ELLIPSIS flag. + if flags & doctest.NORMALIZE_WHITESPACE: + got = ' '.join(got.split()) + want = ' '.join(want.split()) + if got == want: + return True + + # The ELLIPSIS flag says to let the sequence "..." in `want` + # match any substring in `got`. + if flags & doctest.ELLIPSIS: + if doctest._ellipsis_match(want, got): + return True + + # We didn't find any match; return false. + return False + + def check_output(self, want, got, flags): + if (flags & IGNORE_OUTPUT or (six.PY2 and flags & IGNORE_OUTPUT_2) or + (not six.PY2 and flags & IGNORE_OUTPUT_3)): + return True + + if flags & FIX: + want, got = self.do_fixes(want, got) + + if flags & FLOAT_CMP: + return self.normalize_floats(want, got, flags) + + # Can't use super here because doctest.OutputChecker is not a + # new-style class. + return self._original_output_checker.check_output( + self, want, got, flags) + + def output_difference(self, want, got, flags): + if flags & FIX: + want, got = self.do_fixes(want, got) + + # Can't use super here because doctest.OutputChecker is not a + # new-style class. + return self._original_output_checker.output_difference( + self, want, got, flags) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/output_checker.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/output_checker.pyc new file mode 100644 index 0000000000000000000000000000000000000000..affbad3859d97116fb234985d6d30d41d11be7bf Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/output_checker.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_plugins.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_plugins.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc780165a64de13e0eac4b07b0f3fba00abf86a --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_plugins.py @@ -0,0 +1,386 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +These plugins modify the behavior of py.test and are meant to be imported +into conftest.py in the root directory. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import __future__ + +from ..extern import six +from ..extern.six.moves import builtins + +import ast +import datetime +import io +import locale +import math +import os +import re +import sys +import types +from pkgutil import find_loader +from collections import OrderedDict + +import pytest + +from ..config.paths import set_temp_config, set_temp_cache +from .helper import treat_deprecations_as_exceptions, ignore_warnings +from .helper import enable_deprecations_as_exceptions # pylint: disable=W0611 +from ..utils.argparse import writeable_directory +from ..utils.introspection import resolve_name + +try: + import importlib.machinery as importlib_machinery +except ImportError: # Python 2.7 + importlib_machinery = None + +pytest_plugins = ['astropy.tests.pytest_repeat'] + +_PLUGINS_PREFIX = 'astropy.extern.plugins' +for plugin in ['pytest_doctestplus', 'pytest_openfiles', 'pytest_remotedata']: + if find_loader(plugin) is None: + pytest_plugins.append('{}.{}.plugin'.format(_PLUGINS_PREFIX, plugin)) + +# these pytest hooks allow us to mark tests and run the marked tests with +# specific command line options. + +# This makes sure that this module is not collected when running the test +# suite. This is necessary in order to get the test suite to run without errors +# using pytest>=3.7 +if getattr(builtins, '_pytest_running', False): + pytest.skip() + + +def pytest_addoption(parser): + + parser.addoption("--astropy-config-dir", nargs='?', type=writeable_directory, + help="specify directory for storing and retrieving the " + "Astropy configuration during tests (default is " + "to use a temporary directory created by the test " + "runner); be aware that using an Astropy config " + "file other than the default can cause some tests " + "to fail unexpectedly") + + parser.addoption("--astropy-cache-dir", nargs='?', type=writeable_directory, + help="specify directory for storing and retrieving the " + "Astropy cache during tests (default is " + "to use a temporary directory created by the test " + "runner)") + parser.addini("astropy_config_dir", + "specify directory for storing and retrieving the " + "Astropy configuration during tests (default is " + "to use a temporary directory created by the test " + "runner); be aware that using an Astropy config " + "file other than the default can cause some tests " + "to fail unexpectedly", default=None) + + parser.addini("astropy_cache_dir", + "specify directory for storing and retrieving the " + "Astropy cache during tests (default is " + "to use a temporary directory created by the test " + "runner)", default=None) + + +def pytest_configure(config): + treat_deprecations_as_exceptions() + +def pytest_runtest_setup(item): + config_dir = item.config.getini('astropy_config_dir') + cache_dir = item.config.getini('astropy_cache_dir') + + # Command-line options can override, however + config_dir = item.config.getoption('astropy_config_dir') or config_dir + cache_dir = item.config.getoption('astropy_cache_dir') or cache_dir + + # We can't really use context managers directly in py.test (although + # py.test 2.7 adds the capability), so this may look a bit hacky + if config_dir: + item.set_temp_config = set_temp_config(config_dir) + item.set_temp_config.__enter__() + if cache_dir: + item.set_temp_cache = set_temp_cache(cache_dir) + item.set_temp_cache.__enter__() + + + +def pytest_runtest_teardown(item, nextitem): + if hasattr(item, 'set_temp_cache'): + item.set_temp_cache.__exit__() + if hasattr(item, 'set_temp_config'): + item.set_temp_config.__exit__() + + +PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'), + ('Scipy', 'scipy'), + ('Matplotlib', 'matplotlib'), + ('h5py', 'h5py'), + ('Pandas', 'pandas')]) + +# This always returns with Astropy's version +from .. import __version__ + +TESTED_VERSIONS = OrderedDict([('Astropy', __version__)]) + + +def pytest_report_header(config): + + try: + stdoutencoding = sys.stdout.encoding or 'ascii' + except AttributeError: + stdoutencoding = 'ascii' + + if six.PY2: + args = [x.decode('utf-8') for x in config.args] + else: + args = config.args + + # TESTED_VERSIONS can contain the affiliated package version, too + if len(TESTED_VERSIONS) > 1: + for pkg, version in TESTED_VERSIONS.items(): + if pkg != 'Astropy': + s = "\nRunning tests with {0} version {1}.\n".format( + pkg, version) + else: + s = "\nRunning tests with Astropy version {0}.\n".format( + TESTED_VERSIONS['Astropy']) + + # Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from + # each directory argument + if hasattr(config, 'rootdir'): + rootdir = str(config.rootdir) + if not rootdir.endswith(os.sep): + rootdir += os.sep + + dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg + for arg in args] + else: + dirs = args + + s += "Running tests in {0}.\n\n".format(" ".join(dirs)) + + s += "Date: {0}\n\n".format(datetime.datetime.now().isoformat()[:19]) + + from platform import platform + plat = platform() + if isinstance(plat, bytes): + plat = plat.decode(stdoutencoding, 'replace') + s += "Platform: {0}\n\n".format(plat) + s += "Executable: {0}\n\n".format(sys.executable) + s += "Full Python Version: \n{0}\n\n".format(sys.version) + + s += "encodings: sys: {0}, locale: {1}, filesystem: {2}".format( + sys.getdefaultencoding(), + locale.getpreferredencoding(), + sys.getfilesystemencoding()) + if sys.version_info < (3, 3, 0): + s += ", unicode bits: {0}".format( + int(math.log(sys.maxunicode, 2))) + s += '\n' + + s += "byteorder: {0}\n".format(sys.byteorder) + s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format( + sys.float_info) + + for module_display, module_name in six.iteritems(PYTEST_HEADER_MODULES): + try: + with ignore_warnings(DeprecationWarning): + module = resolve_name(module_name) + except ImportError: + s += "{0}: not available\n".format(module_display) + else: + try: + version = module.__version__ + except AttributeError: + version = 'unknown (no __version__ attribute)' + s += "{0}: {1}\n".format(module_display, version) + + # Helpers version + try: + from ..version import astropy_helpers_version + except ImportError: + pass + else: + s += "astropy_helpers: {0}\n".format(astropy_helpers_version) + + special_opts = ["remote_data", "pep8"] + opts = [] + for op in special_opts: + op_value = getattr(config.option, op, None) + if op_value: + if isinstance(op_value, six.string_types): + op = ': '.join((op, op_value)) + opts.append(op) + if opts: + s += "Using Astropy options: {0}.\n".format(", ".join(opts)) + + if six.PY2: + s = s.encode(stdoutencoding, 'replace') + + return s + + +def pytest_pycollect_makemodule(path, parent): + # This is where we set up testing both with and without + # from __future__ import unicode_literals + + # On Python 3, just do the regular thing that py.test does + if six.PY2: + return Pair(path, parent) + else: + return pytest.Module(path, parent) + + +class Pair(pytest.File): + """ + This class treats a given test .py file as a pair of .py files + where one has __future__ unicode_literals and the other does not. + """ + + def collect(self): + # First, just do the regular import of the module to make + # sure it's sane and valid. This block is copied directly + # from py.test + try: + mod = self.fspath.pyimport(ensuresyspath=True) + except SyntaxError: + import py + excinfo = py.code.ExceptionInfo() + raise self.CollectError(excinfo.getrepr(style="short")) + except self.fspath.ImportMismatchError: + e = sys.exc_info()[1] + raise self.CollectError( + "import file mismatch:\n" + "imported module {!r} has this __file__ attribute:\n" + " {}\n" + "which is not the same as the test file we want to collect:\n" + " {}\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules".format(e.args)) + + # Now get the file's content. + with io.open(six.text_type(self.fspath), 'rb') as fd: + content = fd.read() + + # If the file contains the special marker, only test it both ways. + if b'TEST_UNICODE_LITERALS' in content: + # Return the file in both unicode_literal-enabled and disabled forms + return [ + UnicodeLiteralsModule(mod.__name__, content, self.fspath, self), + NoUnicodeLiteralsModule(mod.__name__, content, self.fspath, self) + ] + else: + return [pytest.Module(self.fspath, self)] + + +_RE_FUTURE_IMPORTS = re.compile(br'from __future__ import ((\(.*?\))|([^\n]+))', + flags=re.DOTALL) + + +class ModifiedModule(pytest.Module): + def __init__(self, mod_name, content, path, parent): + self.mod_name = mod_name + self.content = content + super(ModifiedModule, self).__init__(path, parent) + + def _importtestmodule(self): + # We have to remove the __future__ statements *before* parsing + # with compile, otherwise the flags are ignored. + content = re.sub(_RE_FUTURE_IMPORTS, b'\n', self.content) + + new_mod = types.ModuleType(self.mod_name) + new_mod.__file__ = six.text_type(self.fspath) + + if hasattr(self, '_transform_ast'): + # ast.parse doesn't let us hand-select the __future__ + # statements, but built-in compile, with the PyCF_ONLY_AST + # flag does. + tree = compile( + content, six.text_type(self.fspath), 'exec', + self.flags | ast.PyCF_ONLY_AST, True) + tree = self._transform_ast(tree) + # Now that we've transformed the tree, recompile it + code = compile( + tree, six.text_type(self.fspath), 'exec') + else: + # If we don't need to transform the AST, we can skip + # parsing/compiling in two steps + code = compile( + content, six.text_type(self.fspath), 'exec', + self.flags, True) + + pwd = os.getcwd() + try: + os.chdir(os.path.dirname(six.text_type(self.fspath))) + six.exec_(code, new_mod.__dict__) + finally: + os.chdir(pwd) + self.config.pluginmanager.consider_module(new_mod) + return new_mod + + +class UnicodeLiteralsModule(ModifiedModule): + flags = ( + __future__.absolute_import.compiler_flag | + __future__.division.compiler_flag | + __future__.print_function.compiler_flag | + __future__.unicode_literals.compiler_flag + ) + + +class NoUnicodeLiteralsModule(ModifiedModule): + flags = ( + __future__.absolute_import.compiler_flag | + __future__.division.compiler_flag | + __future__.print_function.compiler_flag + ) + + def _transform_ast(self, tree): + # When unicode_literals is disabled, we still need to convert any + # byte string containing non-ascii characters into a Unicode string. + # If it doesn't decode as utf-8, we assume it's some other kind + # of byte string and just ultimately leave it alone. + + # Note that once we drop support for Python 3.2, we should be + # able to remove this transformation and just put explicit u'' + # prefixes in the test source code. + + class NonAsciiLiteral(ast.NodeTransformer): + def visit_Str(self, node): + s = node.s + if isinstance(s, bytes): + try: + s.decode('ascii') + except UnicodeDecodeError: + try: + s = s.decode('utf-8') + except UnicodeDecodeError: + pass + else: + return ast.copy_location(ast.Str(s=s), node) + return node + return NonAsciiLiteral().visit(tree) + + +def pytest_terminal_summary(terminalreporter): + """Output a warning to IPython users in case any tests failed.""" + + try: + get_ipython() + except NameError: + return + + if not terminalreporter.stats.get('failed'): + # Only issue the warning when there are actually failures + return + + terminalreporter.ensure_newline() + terminalreporter.write_line( + 'Some tests are known to fail when run from the IPython prompt; ' + 'especially, but not limited to tests involving logging and warning ' + 'handling. Unless you are certain as to the cause of the failure, ' + 'please check that the failure occurs outside IPython as well. See ' + 'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-' + 'tests-when-running-the-tests-in-ipython for more information.', + yellow=True, bold=True) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_plugins.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_plugins.pyc new file mode 100644 index 0000000000000000000000000000000000000000..796f4a4391d67dd9792ee6d3da49032bb8237370 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_plugins.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_repeat.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_repeat.py new file mode 100644 index 0000000000000000000000000000000000000000..5705a8f39cc10daec5d7aece769981bd535fe8f9 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_repeat.py @@ -0,0 +1,27 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +These plugins modify the behavior of py.test and are meant to be imported +into conftest.py in the root directory. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from ..extern.six.moves import range + + +def pytest_addoption(parser): + + parser.addoption('--repeat', action='store', + help='Number of times to repeat each test') + + +def pytest_generate_tests(metafunc): + + # If the repeat option is set, we add a fixture for the repeat count and + # parametrize the tests over the repeats. Solution adapted from: + # http://stackoverflow.com/q/21764473/180783 + + if metafunc.config.option.repeat is not None: + count = int(metafunc.config.option.repeat) + metafunc.fixturenames.append('tmp_ct') + metafunc.parametrize('tmp_ct', range(count)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_repeat.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_repeat.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb99865cbfdb20cadf3de3ecde1952d7b03d8829 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/pytest_repeat.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/runner.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/runner.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1b9cd1cd2df1d7dc4d6c82076edc825c59d3f0 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/runner.py @@ -0,0 +1,525 @@ +"""Implements the Astropy TestRunner which is a thin wrapper around py.test.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import inspect +import os +import copy +import shlex +import sys +import tempfile +import warnings +from collections import OrderedDict + +from ..config.paths import set_temp_config, set_temp_cache +from ..extern import six +from ..utils import wraps, find_current_module +from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning + +__all__ = ['TestRunner', 'TestRunnerBase', 'keyword'] + + +class keyword(object): + """ + A decorator to mark a method as keyword argument for the ``TestRunner``. + + Parameters + ---------- + default_value : `object` + The default value for the keyword argument. (Default: `None`) + + priority : `int` + keyword argument methods are executed in order of descending priority. + """ + + def __init__(self, default_value=None, priority=0): + self.default_value = default_value + self.priority = priority + + def __call__(self, f): + def keyword(*args, **kwargs): + return f(*args, **kwargs) + + keyword._default_value = self.default_value + keyword._priority = self.priority + # Set __doc__ explicitly here rather than using wraps because we want + # to keep the function name as keyword so we can inspect it later. + keyword.__doc__ = f.__doc__ + + return keyword + + +class TestRunnerBase(object): + """ + The base class for the TestRunner. + + A test runner can be constructed by creating a subclass of this class and + defining 'keyword' methods. These are methods that have the + `~astropy.tests.runner.keyword` decorator, these methods are used to + construct allowed keyword arguments to the + `~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow + customization of individual keyword arguments (and associated logic) + without having to re-implement the whole + `~astropy.tests.runner.TestRunnerBase.run_tests` method. + + Examples + -------- + + A simple keyword method:: + + class MyRunner(TestRunnerBase): + + @keyword('default_value'): + def spam(self, spam, kwargs): + \"\"\" + spam : `str` + The parameter description for the run_tests docstring. + \"\"\" + # Return value must be a list with a CLI parameter for pytest. + return ['--spam={}'.format(spam)] + """ + + def __init__(self, base_path): + self.base_path = os.path.abspath(base_path) + + def __new__(cls, *args, **kwargs): + # Before constructing the class parse all the methods that have been + # decorated with ``keyword``. + + # The objective of this method is to construct a default set of keyword + # arguments to the ``run_tests`` method. It does this by inspecting the + # methods of the class for functions with the name ``keyword`` which is + # the name of the decorator wrapping function. Once it has created this + # dictionary, it also formats the docstring of ``run_tests`` to be + # comprised of the docstrings for the ``keyword`` methods. + + # To add a keyword argument to the ``run_tests`` method, define a new + # method decorated with ``@keyword`` and with the ``self, name, kwargs`` + # signature. + # Get all 'function' members as the wrapped methods are functions + if six.PY2: + functions = inspect.getmembers(cls, predicate=inspect.ismethod) + else: + functions = inspect.getmembers(cls, predicate=inspect.isfunction) + + # Filter out anything that's not got the name 'keyword' + keywords = filter(lambda func: func[1].__name__ == 'keyword', functions) + # Sort all keywords based on the priority flag. + sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True) + + cls.keywords = OrderedDict() + doc_keywords = "" + for name, func in sorted_keywords: + # Here we test if the function has been overloaded to return + # NotImplemented which is the way to disable arguments on + # subclasses. If it has been disabled we need to remove it from the + # default keywords dict. We do it in the try except block because + # we do not have access to an instance of the class, so this is + # going to error unless the method is just doing `return + # NotImplemented`. + try: + # Second argument is False, as it is normally a bool. + # The other two are placeholders for objects. + if func(None, False, None) is NotImplemented: + continue + except Exception: + pass + + # Construct the default kwargs dict and docstring + cls.keywords[name] = func._default_value + if func.__doc__: + doc_keywords += ' '*8 + doc_keywords += func.__doc__.strip() + doc_keywords += '\n\n' + + if six.PY2: + cls.run_tests.__func__.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords) + else: + cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords) + + return super(TestRunnerBase, cls).__new__(cls) + + def _generate_args(self, **kwargs): + # Update default values with passed kwargs + # but don't modify the defaults + keywords = copy.deepcopy(self.keywords) + keywords.update(kwargs) + # Iterate through the keywords (in order of priority) + args = [] + for keyword in keywords.keys(): + func = getattr(self, keyword) + result = func(keywords[keyword], keywords) + + # Allow disabaling of options in a subclass + if result is NotImplemented: + raise TypeError("run_tests() got an unexpected keyword argument {}".format(keyword)) + + # keyword methods must return a list + if not isinstance(result, list): + raise TypeError("{} keyword method must return a list".format(keyword)) + + args += result + + if six.PY2: + args = [x.encode('utf-8') for x in args] + + return args + + RUN_TESTS_DOCSTRING = \ + """ + Run the tests for the package. + + This method builds arguments for and then calls ``pytest.main``. + + Parameters + ---------- + {keywords} + + """ + + def run_tests(self, **kwargs): + # The docstring for this method is defined as a class variable. + # This allows it to be built for each subclass in __new__. + + # Don't import pytest until it's actually needed to run the tests + import pytest + + # Raise error for undefined kwargs + allowed_kwargs = set(self.keywords.keys()) + passed_kwargs = set(kwargs.keys()) + if not passed_kwargs.issubset(allowed_kwargs): + wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs)) + raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0])) + + args = self._generate_args(**kwargs) + + # override the config locations to not make a new directory nor use + # existing cache or config + astropy_config = tempfile.mkdtemp('astropy_config') + astropy_cache = tempfile.mkdtemp('astropy_cache') + + # Have to use nested with statements for cross-Python support + # Note, using these context managers here is superfluous if the + # config_dir or cache_dir options to py.test are in use, but it's + # also harmless to nest the contexts + with set_temp_config(astropy_config, delete=True): + with set_temp_cache(astropy_cache, delete=True): + return pytest.main(args=args, plugins=self.keywords['plugins']) + + @classmethod + def make_test_runner_in(cls, path): + """ + Constructs a `TestRunner` to run in the given path, and returns a + ``test()`` function which takes the same arguments as + `TestRunner.run_tests`. + + The returned ``test()`` function will be defined in the module this + was called from. This is used to implement the ``astropy.test()`` + function (or the equivalent for affiliated packages). + """ + + runner = cls(path) + + @wraps(runner.run_tests, ('__doc__',), exclude_args=('self',)) + def test(**kwargs): + return runner.run_tests(**kwargs) + + module = find_current_module(2) + if module is not None: + test.__module__ = module.__name__ + + # A somewhat unusual hack, but delete the attached __wrapped__ + # attribute--although this is normally used to tell if the function + # was wrapped with wraps, on some version of Python this is also + # used to determine the signature to display in help() which is + # not useful in this case. We don't really care in this case if the + # function was wrapped either + if hasattr(test, '__wrapped__'): + del test.__wrapped__ + + return test + + +class TestRunner(TestRunnerBase): + """ + A test runner for astropy tests + """ + + # Increase priority so this warning is displayed first. + @keyword(priority=1000) + def coverage(self, coverage, kwargs): + if coverage: + warnings.warn( + "The coverage option is ignored on run_tests, since it " + "can not be made to work in that context. Use " + "'python setup.py test --coverage' instead.", + AstropyWarning) + + return [] + + # test_path depends on self.package_path so make sure this runs before + # test_path. + @keyword(priority=1) + def package(self, package, kwargs): + """ + package : str, optional + The name of a specific package to test, e.g. 'io.fits' or 'utils'. + If nothing is specified all default Astropy tests are run. + """ + if package is None: + self.package_path = self.base_path + else: + self.package_path = os.path.join(self.base_path, + package.replace('.', os.path.sep)) + + if not os.path.isdir(self.package_path): + raise ValueError('Package not found: {0}'.format(package)) + + if not kwargs['test_path']: + return [self.package_path] + + return [] + + @keyword() + def test_path(self, test_path, kwargs): + """ + test_path : str, optional + Specify location to test by path. May be a single file or + directory. Must be specified absolutely or relative to the + calling directory. + """ + all_args = [] + # Ensure that the package kwarg has been run. + self.package(kwargs['package'], kwargs) + if test_path: + base, ext = os.path.splitext(test_path) + + if ext in ('.rst', ''): + if kwargs['docs_path'] is None: + # This shouldn't happen from "python setup.py test" + raise ValueError( + "Can not test .rst files without a docs_path " + "specified.") + + abs_docs_path = os.path.abspath(kwargs['docs_path']) + abs_test_path = os.path.abspath( + os.path.join(abs_docs_path, os.pardir, test_path)) + + common = os.path.commonprefix((abs_docs_path, abs_test_path)) + + if os.path.exists(abs_test_path) and common == abs_docs_path: + # Since we aren't testing any Python files within + # the astropy tree, we need to forcibly load the + # astropy py.test plugins, and then turn on the + # doctest_rst plugin. + all_args.extend(['-p', 'astropy.tests.pytest_plugins', + '--doctest-rst']) + test_path = abs_test_path + + if not (os.path.isdir(test_path) or ext in ('.py', '.rst')): + raise ValueError("Test path must be a directory or a path to " + "a .py or .rst file") + + return all_args + [test_path] + + return [] + + @keyword() + def args(self, args, kwargs): + """ + args : str, optional + Additional arguments to be passed to ``pytest.main`` in the ``args`` + keyword argument. + """ + if args: + return shlex.split(args, posix=not sys.platform.startswith('win')) + + return [] + + @keyword() + def plugins(self, plugins, kwargs): + """ + plugins : list, optional + Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword + argument. + """ + return [] + + @keyword() + def verbose(self, verbose, kwargs): + """ + verbose : bool, optional + Convenience option to turn on verbose output from py.test. Passing + True is the same as specifying ``-v`` in ``args``. + """ + if verbose: + return ['-v'] + + return [] + + @keyword() + def pastebin(self, pastebin, kwargs): + """ + pastebin : ('failed', 'all', None), optional + Convenience option for turning on py.test pastebin output. Set to + 'failed' to upload info for failed tests, or 'all' to upload info + for all tests. + """ + if pastebin is not None: + if pastebin in ['failed', 'all']: + return ['--pastebin={0}'.format(pastebin)] + else: + raise ValueError("pastebin should be 'failed' or 'all'") + + return [] + + @keyword(default_value='none') + def remote_data(self, remote_data, kwargs): + """ + remote_data : {'none', 'astropy', 'any'}, optional + Controls whether to run tests marked with @remote_data. This can be + set to run no tests with remote data (``none``), only ones that use + data from http://data.astropy.org (``astropy``), or all tests that + use remote data (``any``). The default is ``none``. + """ + + if remote_data is True: + remote_data = 'any' + elif remote_data is False: + remote_data = 'none' + elif remote_data not in ('none', 'astropy', 'any'): + warnings.warn("The remote_data option should be one of " + "none/astropy/any (found {0}). For backward-compatibility, " + "assuming 'any', but you should change the option to be " + "one of the supported ones to avoid issues in " + "future.".format(remote_data), + AstropyDeprecationWarning) + remote_data = 'any' + + return ['--remote-data={0}'.format(remote_data)] + + @keyword() + def pep8(self, pep8, kwargs): + """ + pep8 : bool, optional + Turn on PEP8 checking via the pytest-pep8 plugin and disable normal + tests. Same as specifying ``--pep8 -k pep8`` in ``args``. + """ + if pep8: + try: + import pytest_pep8 # pylint: disable=W0611 + except ImportError: + raise ImportError('PEP8 checking requires pytest-pep8 plugin: ' + 'http://pypi.python.org/pypi/pytest-pep8') + else: + return ['--pep8', '-k', 'pep8'] + + return [] + + @keyword() + def pdb(self, pdb, kwargs): + """ + pdb : bool, optional + Turn on PDB post-mortem analysis for failing tests. Same as + specifying ``--pdb`` in ``args``. + """ + if pdb: + return ['--pdb'] + return [] + + @keyword() + def open_files(self, open_files, kwargs): + """ + open_files : bool, optional + Fail when any tests leave files open. Off by default, because + this adds extra run time to the test suite. Requires the + ``psutil`` package. + """ + if open_files: + if kwargs['parallel'] != 0: + raise SystemError( + "open file detection may not be used in conjunction with " + "parallel testing.") + + try: + import psutil # pylint: disable=W0611 + except ImportError: + raise SystemError( + "open file detection requested, but psutil package " + "is not installed.") + + return ['--open-files'] + + print("Checking for unclosed files") + + return [] + + @keyword(0) + def parallel(self, parallel, kwargs): + """ + parallel : int or 'auto', optional + When provided, run the tests in parallel on the specified + number of CPUs. If parallel is ``'auto'``, it will use the all + the cores on the machine. Requires the ``pytest-xdist`` plugin. + """ + if parallel != 0: + try: + from xdist import plugin # noqa + except ImportError: + raise SystemError( + "running tests in parallel requires the pytest-xdist package") + + return ['-n', six.text_type(parallel)] + + return [] + + @keyword() + def docs_path(self, docs_path, kwargs): + """ + docs_path : str, optional + The path to the documentation .rst files. + """ + if docs_path is not None and not kwargs['skip_docs']: + if kwargs['package'] is not None: + docs_path = os.path.join( + docs_path, kwargs['package'].replace('.', os.path.sep)) + if not os.path.exists(docs_path): + warnings.warn( + "Can not test .rst docs, since docs path " + "({0}) does not exist.".format(docs_path)) + docs_path = None + if docs_path and not kwargs['skip_docs'] and not kwargs['test_path']: + return [docs_path, '--doctest-rst'] + + return [] + + @keyword() + def skip_docs(self, skip_docs, kwargs): + """ + skip_docs : `bool`, optional + When `True`, skips running the doctests in the .rst files. + """ + # Skip docs is a bool used by docs_path only. + return [] + + @keyword() + def repeat(self, repeat, kwargs): + """ + repeat : `int`, optional + If set, specifies how many times each test should be run. This is + useful for diagnosing sporadic failures. + """ + if repeat: + return ['--repeat={0}'.format(repeat)] + + return [] + + # Override run_tests for astropy-specific fixes + def run_tests(self, **kwargs): + + # This prevents cyclical import problems that make it + # impossible to test packages that define Table types on their + # own. + from ..table import Table # pylint: disable=W0611 + + return super(TestRunner, self).run_tests(**kwargs) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/runner.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/runner.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4eb8c6988cba2043b210b1649e7e759bdbe649af Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/runner.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/setup_package.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/setup_package.py new file mode 100644 index 0000000000000000000000000000000000000000..7081eb89ab8d4a33a8ae2acdc5689b8f4eaedae6 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/setup_package.py @@ -0,0 +1,11 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def get_package_data(): + return { + 'astropy.tests': ['coveragerc'], + 'astropy.tests.tests': ['data/open_file_detection.txt']} + + +def requires_2to3(): + return False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/setup_package.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/setup_package.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbbc18bb56cae15092808bcd86c09cd542f5a8a6 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/setup_package.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/test_logger.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/test_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..2b380b826f626ec286f7ba27c63061228ee1514d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/test_logger.py @@ -0,0 +1,489 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import imp +import sys +import warnings + +import pytest + +from .helper import catch_warnings +from .. import log +from ..logger import LoggingError, conf +from ..utils.exceptions import AstropyWarning, AstropyUserWarning + + +# Save original values of hooks. These are not the system values, but the +# already overwritten values since the logger already gets imported before +# this file gets executed. +_excepthook = sys.__excepthook__ +_showwarning = warnings.showwarning + + +try: + ip = get_ipython() +except NameError: + ip = None + + +def setup_function(function): + + # Reset modules to default + imp.reload(warnings) + imp.reload(sys) + + # Reset internal original hooks + log._showwarning_orig = None + log._excepthook_orig = None + + # Set up the logger + log._set_defaults() + + # Reset hooks + if log.warnings_logging_enabled(): + log.disable_warnings_logging() + if log.exception_logging_enabled(): + log.disable_exception_logging() + + +teardown_module = setup_function + + +def test_warnings_logging_disable_no_enable(): + with pytest.raises(LoggingError) as e: + log.disable_warnings_logging() + assert e.value.args[0] == 'Warnings logging has not been enabled' + + +def test_warnings_logging_enable_twice(): + log.enable_warnings_logging() + with pytest.raises(LoggingError) as e: + log.enable_warnings_logging() + assert e.value.args[0] == 'Warnings logging has already been enabled' + + +def test_warnings_logging_overridden(): + log.enable_warnings_logging() + warnings.showwarning = lambda: None + with pytest.raises(LoggingError) as e: + log.disable_warnings_logging() + assert e.value.args[0] == 'Cannot disable warnings logging: warnings.showwarning was not set by this logger, or has been overridden' + + +def test_warnings_logging(): + + # Without warnings logging + with catch_warnings() as warn_list: + with log.log_to_list() as log_list: + warnings.warn("This is a warning", AstropyUserWarning) + assert len(log_list) == 0 + assert len(warn_list) == 1 + assert warn_list[0].message.args[0] == "This is a warning" + + # With warnings logging + with catch_warnings() as warn_list: + log.enable_warnings_logging() + with log.log_to_list() as log_list: + warnings.warn("This is a warning", AstropyUserWarning) + log.disable_warnings_logging() + assert len(log_list) == 1 + assert len(warn_list) == 0 + assert log_list[0].levelname == 'WARNING' + assert log_list[0].message.startswith('This is a warning') + assert log_list[0].origin == 'astropy.tests.test_logger' + + # With warnings logging (differentiate between Astropy and non-Astropy) + with catch_warnings() as warn_list: + log.enable_warnings_logging() + with log.log_to_list() as log_list: + warnings.warn("This is a warning", AstropyUserWarning) + warnings.warn("This is another warning, not from Astropy") + log.disable_warnings_logging() + assert len(log_list) == 1 + assert len(warn_list) == 1 + assert log_list[0].levelname == 'WARNING' + assert log_list[0].message.startswith('This is a warning') + assert log_list[0].origin == 'astropy.tests.test_logger' + assert warn_list[0].message.args[0] == "This is another warning, not from Astropy" + + # Without warnings logging + with catch_warnings() as warn_list: + with log.log_to_list() as log_list: + warnings.warn("This is a warning", AstropyUserWarning) + assert len(log_list) == 0 + assert len(warn_list) == 1 + assert warn_list[0].message.args[0] == "This is a warning" + + +def test_warnings_logging_with_custom_class(): + class CustomAstropyWarningClass(AstropyWarning): + pass + + # With warnings logging + with catch_warnings() as warn_list: + log.enable_warnings_logging() + with log.log_to_list() as log_list: + warnings.warn("This is a warning", CustomAstropyWarningClass) + log.disable_warnings_logging() + assert len(log_list) == 1 + assert len(warn_list) == 0 + assert log_list[0].levelname == 'WARNING' + assert log_list[0].message.startswith('CustomAstropyWarningClass: This is a warning') + assert log_list[0].origin == 'astropy.tests.test_logger' + + +def test_warning_logging_with_io_votable_warning(): + from ..io.votable.exceptions import W02, vo_warn + + with catch_warnings() as warn_list: + log.enable_warnings_logging() + with log.log_to_list() as log_list: + vo_warn(W02, ('a', 'b')) + log.disable_warnings_logging() + assert len(log_list) == 1 + assert len(warn_list) == 0 + assert log_list[0].levelname == 'WARNING' + x = log_list[0].message.startswith(("W02: ?:?:?: W02: a attribute 'b' is " + "invalid. Must be a standard XML id")) + assert x + assert log_list[0].origin == 'astropy.tests.test_logger' + + +def test_import_error_in_warning_logging(): + """ + Regression test for https://github.com/astropy/astropy/issues/2671 + + This test actually puts a goofy fake module into ``sys.modules`` to test + this problem. + """ + + class FakeModule(object): + def __getattr__(self, attr): + raise ImportError('_showwarning should ignore any exceptions ' + 'here') + + log.enable_warnings_logging() + + sys.modules[''] = FakeModule() + try: + warnings.showwarning(AstropyWarning('Regression test for #2671'), + AstropyWarning, '', 1) + finally: + del sys.modules[''] + + +def test_exception_logging_disable_no_enable(): + with pytest.raises(LoggingError) as e: + log.disable_exception_logging() + assert e.value.args[0] == 'Exception logging has not been enabled' + + +def test_exception_logging_enable_twice(): + log.enable_exception_logging() + with pytest.raises(LoggingError) as e: + log.enable_exception_logging() + assert e.value.args[0] == 'Exception logging has already been enabled' + + +# You can't really override the exception handler in IPython this way, so +# this test doesn't really make sense in the IPython context. +@pytest.mark.skipif(str("ip is not None")) +def test_exception_logging_overridden(): + log.enable_exception_logging() + sys.excepthook = lambda etype, evalue, tb: None + with pytest.raises(LoggingError) as e: + log.disable_exception_logging() + assert e.value.args[0] == 'Cannot disable exception logging: sys.excepthook was not set by this logger, or has been overridden' + + +@pytest.mark.xfail(str("ip is not None")) +def test_exception_logging(): + + # Without exception logging + try: + with log.log_to_list() as log_list: + raise Exception("This is an Exception") + except Exception as exc: + sys.excepthook(*sys.exc_info()) + assert exc.args[0] == "This is an Exception" + else: + assert False # exception should have been raised + assert len(log_list) == 0 + + # With exception logging + try: + log.enable_exception_logging() + with log.log_to_list() as log_list: + raise Exception("This is an Exception") + except Exception as exc: + sys.excepthook(*sys.exc_info()) + assert exc.args[0] == "This is an Exception" + else: + assert False # exception should have been raised + assert len(log_list) == 1 + assert log_list[0].levelname == 'ERROR' + assert log_list[0].message.startswith('Exception: This is an Exception') + assert log_list[0].origin == 'astropy.tests.test_logger' + + # Without exception logging + log.disable_exception_logging() + try: + with log.log_to_list() as log_list: + raise Exception("This is an Exception") + except Exception as exc: + sys.excepthook(*sys.exc_info()) + assert exc.args[0] == "This is an Exception" + else: + assert False # exception should have been raised + assert len(log_list) == 0 + + +@pytest.mark.xfail(str("ip is not None")) +def test_exception_logging_origin(): + # The point here is to get an exception raised from another location + # and make sure the error's origin is reported correctly + + from ..utils.collections import HomogeneousList + + l = HomogeneousList(int) + try: + log.enable_exception_logging() + with log.log_to_list() as log_list: + l.append('foo') + except TypeError as exc: + sys.excepthook(*sys.exc_info()) + assert exc.args[0].startswith( + "homogeneous list must contain only objects of type ") + else: + assert False + assert len(log_list) == 1 + assert log_list[0].levelname == 'ERROR' + assert log_list[0].message.startswith( + "TypeError: homogeneous list must contain only objects of type ") + assert log_list[0].origin == 'astropy.utils.collections' + + +@pytest.mark.skipif("sys.version_info[:2] >= (3, 5)", + reason="Infinite recursion on Python 3.5") +@pytest.mark.xfail(str("ip is not None")) +def test_exception_logging_argless_exception(): + """ + Regression test for a crash that occurred on Python 3 when logging an + exception that was instantiated with no arguments (no message, etc.) + + Regression test for https://github.com/astropy/astropy/pull/4056 + """ + + try: + log.enable_exception_logging() + with log.log_to_list() as log_list: + raise Exception() + except Exception as exc: + sys.excepthook(*sys.exc_info()) + else: + assert False # exception should have been raised + assert len(log_list) == 1 + assert log_list[0].levelname == 'ERROR' + # Pytest changed the format of its error message sometime between 3.1 and + # 3.3. Using ``startswith`` lets us be general enough to handle all cases. + assert log_list[0].message.startswith('Exception') + assert log_list[0].origin == 'astropy.tests.test_logger' + + +@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR']) +def test_log_to_list(level): + + orig_level = log.level + + try: + if level is not None: + log.setLevel(level) + + with log.log_to_list() as log_list: + log.error("Error message") + log.warning("Warning message") + log.info("Information message") + log.debug("Debug message") + finally: + log.setLevel(orig_level) + + if level is None: + # The log level *should* be set to whatever it was in the config + level = conf.log_level + + # Check list length + if level == 'DEBUG': + assert len(log_list) == 4 + elif level == 'INFO': + assert len(log_list) == 3 + elif level == 'WARN': + assert len(log_list) == 2 + elif level == 'ERROR': + assert len(log_list) == 1 + + # Check list content + + assert log_list[0].levelname == 'ERROR' + assert log_list[0].message.startswith('Error message') + assert log_list[0].origin == 'astropy.tests.test_logger' + + if len(log_list) >= 2: + assert log_list[1].levelname == 'WARNING' + assert log_list[1].message.startswith('Warning message') + assert log_list[1].origin == 'astropy.tests.test_logger' + + if len(log_list) >= 3: + assert log_list[2].levelname == 'INFO' + assert log_list[2].message.startswith('Information message') + assert log_list[2].origin == 'astropy.tests.test_logger' + + if len(log_list) >= 4: + assert log_list[3].levelname == 'DEBUG' + assert log_list[3].message.startswith('Debug message') + assert log_list[3].origin == 'astropy.tests.test_logger' + + +def test_log_to_list_level(): + + with log.log_to_list(filter_level='ERROR') as log_list: + log.error("Error message") + log.warning("Warning message") + + assert len(log_list) == 1 and log_list[0].levelname == 'ERROR' + + +def test_log_to_list_origin1(): + + with log.log_to_list(filter_origin='astropy.tests') as log_list: + log.error("Error message") + log.warning("Warning message") + + assert len(log_list) == 2 + + +def test_log_to_list_origin2(): + + with log.log_to_list(filter_origin='astropy.wcs') as log_list: + log.error("Error message") + log.warning("Warning message") + + assert len(log_list) == 0 + + +@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR']) +def test_log_to_file(tmpdir, level): + + local_path = tmpdir.join('test.log') + log_file = local_path.open('wb') + log_path = str(local_path.realpath()) + orig_level = log.level + + try: + if level is not None: + log.setLevel(level) + + with log.log_to_file(log_path): + log.error("Error message") + log.warning("Warning message") + log.info("Information message") + log.debug("Debug message") + + log_file.close() + finally: + log.setLevel(orig_level) + + log_file = local_path.open('rb') + log_entries = log_file.readlines() + log_file.close() + + if level is None: + # The log level *should* be set to whatever it was in the config + level = conf.log_level + + # Check list length + if level == 'DEBUG': + assert len(log_entries) == 4 + elif level == 'INFO': + assert len(log_entries) == 3 + elif level == 'WARN': + assert len(log_entries) == 2 + elif level == 'ERROR': + assert len(log_entries) == 1 + + # Check list content + + assert eval(log_entries[0].strip())[-3:] == ( + 'astropy.tests.test_logger', 'ERROR', 'Error message') + + if len(log_entries) >= 2: + assert eval(log_entries[1].strip())[-3:] == ( + 'astropy.tests.test_logger', 'WARNING', 'Warning message') + + if len(log_entries) >= 3: + assert eval(log_entries[2].strip())[-3:] == ( + 'astropy.tests.test_logger', 'INFO', 'Information message') + + if len(log_entries) >= 4: + assert eval(log_entries[3].strip())[-3:] == ( + 'astropy.tests.test_logger', 'DEBUG', 'Debug message') + + +def test_log_to_file_level(tmpdir): + + local_path = tmpdir.join('test.log') + log_file = local_path.open('wb') + log_path = str(local_path.realpath()) + + with log.log_to_file(log_path, filter_level='ERROR'): + log.error("Error message") + log.warning("Warning message") + + log_file.close() + + log_file = local_path.open('rb') + log_entries = log_file.readlines() + log_file.close() + + assert len(log_entries) == 1 + assert eval(log_entries[0].strip())[-2:] == ( + 'ERROR', 'Error message') + + +def test_log_to_file_origin1(tmpdir): + + local_path = tmpdir.join('test.log') + log_file = local_path.open('wb') + log_path = str(local_path.realpath()) + + with log.log_to_file(log_path, filter_origin='astropy.tests'): + log.error("Error message") + log.warning("Warning message") + + log_file.close() + + log_file = local_path.open('rb') + log_entries = log_file.readlines() + log_file.close() + + assert len(log_entries) == 2 + + +def test_log_to_file_origin2(tmpdir): + + local_path = tmpdir.join('test.log') + log_file = local_path.open('wb') + log_path = str(local_path.realpath()) + + with log.log_to_file(log_path, filter_origin='astropy.wcs'): + log.error("Error message") + log.warning("Warning message") + + log_file.close() + + log_file = local_path.open('rb') + log_entries = log_file.readlines() + log_file.close() + + assert len(log_entries) == 0 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/test_logger.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/test_logger.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfc63af673efba606561d681c18cb8665cdbb958 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/test_logger.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..800d82e7ee00f69a89739dd3a1c3c6f5e29be442 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/__init__.py @@ -0,0 +1,2 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41ffa3a4202a8d529022bdbd6ce9c46fac6515dc Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/data/open_file_detection.txt b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/data/open_file_detection.txt new file mode 100644 index 0000000000000000000000000000000000000000..2cfb00b7ead3e4bcf4070bd51e22f4df9cf30b0f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/data/open_file_detection.txt @@ -0,0 +1 @@ +CONTENTS diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_imports.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..55d4cec9000c291ee6707165e21f7ea8b4fd5c14 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_imports.py @@ -0,0 +1,72 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ...extern import six + +import pkgutil +import os +import types + + +# Compatibility subpackages that should only be used on Python 2 +_py2_packages = set([ + 'astropy.extern.configobj_py2', +]) + +# Same but for Python 3 +_py3_packages = set([ + 'astropy.extern.configobj_py3', +]) + + +def test_imports(): + """ + This just imports all modules in astropy, making sure they don't have any + dependencies that sneak through + """ + + from ...utils import find_current_module + + pkgornm = find_current_module(1).__name__.split('.')[0] + + if isinstance(pkgornm, six.string_types): + package = pkgutil.get_loader(pkgornm).load_module(pkgornm) + elif (isinstance(pkgornm, types.ModuleType) and + '__init__' in pkgornm.__file__): + package = pkgornm + else: + msg = 'test_imports is not determining a valid package/package name' + raise TypeError(msg) + + if hasattr(package, '__path__'): + pkgpath = package.__path__ + elif hasattr(package, '__file__'): + pkgpath = os.path.split(package.__file__)[0] + else: + raise AttributeError('package to generate config items for does not ' + 'have __file__ or __path__') + + if six.PY2: + excludes = _py3_packages + else: + excludes = _py2_packages + + prefix = package.__name__ + '.' + + def onerror(name): + if not any(name.startswith(excl) for excl in excludes): + # A legitimate error occurred in a module that wasn't excluded + raise + + for imper, nm, ispkg in pkgutil.walk_packages(pkgpath, prefix, + onerror=onerror): + imper.find_module(nm) + + +def test_toplevel_namespace(): + import astropy + d = dir(astropy) + assert 'os' not in d + assert 'log' in d + assert 'test' in d + assert 'sys' not in d diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_imports.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_imports.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80c7af8325adc4f4bbc699387d3bac88536431d3 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_imports.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_open_file_detection.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_open_file_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..2f2d8c8cb73a0d09c62bfa0c15f3c86cc81411e7 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_open_file_detection.py @@ -0,0 +1,17 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + + +from ...utils.data import get_pkg_data_filename + +fd = None + + +def test_open_file_detection(): + global fd + fd = open(get_pkg_data_filename('data/open_file_detection.txt')) + + +def teardown(): + if fd is not None: + fd.close() diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_open_file_detection.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_open_file_detection.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4a9b8505848a54c6a8cf652cb603d19837d6f5e Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_open_file_detection.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_quantity_helpers.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_quantity_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..d70087462a4b6b48487f60a12d45775f872c08d9 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_quantity_helpers.py @@ -0,0 +1,38 @@ +from ... import units as u + +from ..helper import assert_quantity_allclose, pytest + + +def test_assert_quantity_allclose(): + + assert_quantity_allclose([1, 2], [1, 2]) + + assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm) + + assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=2 * u.cm) + + with pytest.raises(AssertionError): + assert_quantity_allclose([1, 2] * u.m, [90, 200] * u.cm) + + with pytest.raises(AssertionError): + assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=0.5 * u.cm) + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2] * u.m, [100, 200]) + assert exc.value.args[0] == "Units for 'desired' () and 'actual' (m) are not convertible" + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2], [100, 200] * u.cm) + assert exc.value.args[0] == "Units for 'desired' (cm) and 'actual' () are not convertible" + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm, atol=0.3) + assert exc.value.args[0] == "Units for 'atol' () and 'actual' (m) are not convertible" + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2], [1, 2], atol=0.3 * u.m) + assert exc.value.args[0] == "Units for 'atol' (m) and 'actual' () are not convertible" + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2], [1, 2], rtol=0.3 * u.m) + assert exc.value.args[0] == "`rtol` should be dimensionless" diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_quantity_helpers.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_quantity_helpers.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adf645346d2dd9910200f9738e29f2e9a4a82962 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_quantity_helpers.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_run_tests.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_run_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..3104e1db821c46a20558921da76cc5b117d81124 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_run_tests.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import doctest + +from textwrap import dedent + +import pytest + +# test helper.run_tests function +from ... import test as run_tests +from ... extern import six + +from .. import helper + + +# run_tests should raise ValueError when asked to run on a module it can't find +def test_module_not_found(): + with helper.pytest.raises(ValueError): + run_tests(package='fake.module') + + +# run_tests should raise ValueError when passed an invalid pastebin= option +def test_pastebin_keyword(): + with helper.pytest.raises(ValueError): + run_tests(pastebin='not_an_option') + + +# TODO: Temporarily disabled, as this seems to non-deterministically fail +# def test_deprecation_warning(): +# with pytest.raises(DeprecationWarning): +# warnings.warn('test warning', DeprecationWarning) + + +def test_unicode_literal_conversion(): + assert isinstance('ångström', six.text_type) + + +def test_doctest_float_replacement(tmpdir): + test1 = dedent(""" + This will demonstrate a doctest that fails due to a few extra decimal + places:: + + >>> 1.0 / 3.0 + 0.333333333333333311 + """) + + test2 = dedent(""" + This is the same test, but it should pass with use of + +FLOAT_CMP:: + + >>> 1.0 / 3.0 # doctest: +FLOAT_CMP + 0.333333333333333311 + """) + + test1_rst = tmpdir.join('test1.rst') + test2_rst = tmpdir.join('test2.rst') + test1_rst.write(test1) + test2_rst.write(test2) + + with pytest.raises(doctest.DocTestFailure): + doctest.testfile(str(test1_rst), module_relative=False, + raise_on_error=True, verbose=False, encoding='utf-8') + + doctest.testfile(str(test2_rst), module_relative=False, + raise_on_error=True, verbose=False, encoding='utf-8') diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_run_tests.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_run_tests.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7be438caa782d9ed0de3315ed2b04d260b78d290 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_run_tests.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_runner.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..952e9ea2fa10c6b8da0278fadccaa777d29de178 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_runner.py @@ -0,0 +1,87 @@ +import pytest + +# Renamed these imports so that them being in the namespace will not +# cause pytest 3 to discover them as tests and then complain that +# they have __init__ defined. +from astropy.tests.runner import TestRunner as _TestRunner +from astropy.tests.runner import TestRunnerBase as _TestRunnerBase +from astropy.tests.runner import keyword + + +def test_disable_kwarg(): + class no_remote_data(_TestRunner): + @keyword() + def remote_data(self, remote_data, kwargs): + return NotImplemented + + r = no_remote_data('.') + with pytest.raises(TypeError): + r.run_tests(remote_data='bob') + + +def test_wrong_kwarg(): + r = _TestRunner('.') + with pytest.raises(TypeError): + r.run_tests(spam='eggs') + + +def test_invalid_kwarg(): + class bad_return(_TestRunnerBase): + @keyword() + def remote_data(self, remote_data, kwargs): + return 'bob' + + r = bad_return('.') + with pytest.raises(TypeError): + r.run_tests(remote_data='bob') + + +def test_new_kwarg(): + class Spam(_TestRunnerBase): + @keyword() + def spam(self, spam, kwargs): + return [spam] + + r = Spam('.') + + args = r._generate_args(spam='spam') + + assert ['spam'] == args + + +def test_priority(): + class Spam(_TestRunnerBase): + @keyword() + def spam(self, spam, kwargs): + return [spam] + + @keyword(priority=1) + def eggs(self, eggs, kwargs): + return [eggs] + + r = Spam('.') + + args = r._generate_args(spam='spam', eggs='eggs') + + assert ['eggs', 'spam'] == args + + +def test_docs(): + class Spam(_TestRunnerBase): + @keyword() + def spam(self, spam, kwargs): + """ + Spam Spam Spam + """ + return [spam] + + @keyword() + def eggs(self, eggs, kwargs): + """ + eggs asldjasljd + """ + return [eggs] + + r = Spam('.') + assert "eggs" in r.run_tests.__doc__ + assert "Spam Spam Spam" in r.run_tests.__doc__ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_runner.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_runner.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea040b80b73d37026b76bef026b0e62e1afabc93 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_runner.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_skip_remote_data.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_skip_remote_data.py new file mode 100644 index 0000000000000000000000000000000000000000..21d789e9247b78c4e893b305d35d3ca5b2780576 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_skip_remote_data.py @@ -0,0 +1,49 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +# this test doesn't actually use any online data, it should just be skipped +# by run_tests because it has the remote_data decorator. +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest + +from ..helper import remote_data +from ...utils.data import get_pkg_data_filename, download_file + + +@remote_data +def test_skip_remote_data(pytestconfig): + + # astropy.test() has remote_data=none or remote_data=astropy but we still + # got here somehow, so fail with a helpful message + + if pytestconfig.getoption('remote_data') == 'none': + pytest.fail('@remote_data was not skipped with remote_data=none') + elif pytestconfig.getoption('remote_data') == 'astropy': + pytest.fail('@remote_data was not skipped with remote_data=astropy') + + # Test Astropy URL + get_pkg_data_filename('galactic_center/gc_2mass_k.fits') + + # Test non-Astropy URL + download_file('http://www.google.com') + + +@remote_data(source='astropy') +def test_skip_remote_data_astropy(pytestconfig): + + # astropy.test() has remote_data=none but we still got here somehow, + # so fail with a helpful message + + if pytestconfig.getoption('remote_data') == 'none': + pytest.fail('@remote_data was not skipped with remote_data=none') + + # Test Astropy URL + get_pkg_data_filename('galactic_center/gc_2mass_k.fits') + + # Test non-Astropy URL + if pytestconfig.getoption('remote_data') == 'astropy': + with pytest.raises(Exception) as exc: + download_file('http://www.google.com') + assert "An attempt was made to connect to the internet" in str(exc.value) + else: + download_file('http://www.google.com') diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_skip_remote_data.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_skip_remote_data.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01905a553e2a8c04acfaacc09f4328491988f7f1 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_skip_remote_data.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_socketblocker.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_socketblocker.py new file mode 100644 index 0000000000000000000000000000000000000000..f1b1fdd1c96bc1e1625431644b2bf172c086c94d --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_socketblocker.py @@ -0,0 +1,87 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import sys +import time + +from threading import Thread + +import pytest + +from ..disable_internet import no_internet +from ...extern.six.moves import BaseHTTPServer, SimpleHTTPServer +from ...extern.six.moves.urllib.request import urlopen + + +def test_outgoing_fails(): + with pytest.raises(IOError): + with no_internet(): + urlopen('http://www.python.org') + + +class StoppableHTTPServer(BaseHTTPServer.HTTPServer, object): + def __init__(self, *args): + super(StoppableHTTPServer, self).__init__(*args) + self.stop = False + + def handle_request(self): + self.stop = True + super(StoppableHTTPServer, self).handle_request() + + def serve_forever(self): + """ + Serve until stop set, which will happen if any request is handled + """ + while not self.stop: + self.handle_request() + + +@pytest.mark.parametrize(('localhost'), ('localhost', '127.0.0.1')) +def test_localconnect_succeeds(localhost): + """ + Ensure that connections to localhost are allowed, since these are genuinely + not remotedata. + """ + + # port "0" means find open port + # see http://stackoverflow.com/questions/1365265/on-localhost-how-to-pick-a-free-port-number + httpd = StoppableHTTPServer(('localhost', 0), + SimpleHTTPServer.SimpleHTTPRequestHandler) + + port = httpd.socket.getsockname()[1] + + server = Thread(target=httpd.serve_forever) + server.setDaemon(True) + + server.start() + time.sleep(0.1) + + urlopen('http://{localhost:s}:{port:d}'.format(localhost=localhost, port=port)).close() + + +PY3_4 = sys.version_info[:2] >= (3, 4) + + +# Used for the below test--inline functions aren't pickleable +# by multiprocessing? +def _square(x): + return x ** 2 + + +@pytest.mark.skipif('not PY3_4 or sys.platform == "win32" or sys.platform.startswith("gnu0")') +def test_multiprocessing_forkserver(): + """ + Test that using multiprocessing with forkserver works. Perhaps + a simpler more direct test would be to just open some local + sockets and pass something through them. + + Regression test for https://github.com/astropy/astropy/pull/3713 + """ + + import multiprocessing + ctx = multiprocessing.get_context('forkserver') + pool = ctx.Pool(1) + result = pool.map(_square, [1, 2, 3, 4, 5]) + pool.close() + pool.join() + assert result == [1, 4, 9, 16, 25] diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_socketblocker.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_socketblocker.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dff85b3a32ff547cf2feaeb28a07a1aa19f0d95 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/tests/tests/test_socketblocker.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..564a408e5a4a83827f7a18ac0f178d8d8e1f2c8b --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/__init__.py @@ -0,0 +1,3 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from .formats import * +from .core import * diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a7c568e4165997933696946044091e33a287e97 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/core.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/core.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b3e0d51d302dd57f79fc95b7f7a0a8d1146697 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/core.py @@ -0,0 +1,1753 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +The astropy.time package provides functionality for manipulating times and +dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI, +UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in +astronomy. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import copy +import operator +from datetime import datetime +from collections import defaultdict + +import numpy as np + +from .. import units as u, constants as const +from .. import _erfa as erfa +from ..units import UnitConversionError +from ..utils.decorators import lazyproperty +from ..utils import ShapedLikeNDArray +from ..utils.compat.misc import override__dir__ +from ..utils.data_info import MixinInfo, data_info_factory +from ..utils.compat.numpy import broadcast_to +from ..extern import six +from ..extern.six.moves import zip +from .utils import day_frac +from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS, + TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime) +# Import TimeFromEpoch to avoid breaking code that followed the old example of +# making a custom timescale in the documentation. +from .formats import TimeFromEpoch # pylint: disable=W0611 + + +__all__ = ['Time', 'TimeDelta', 'TIME_SCALES', 'TIME_DELTA_SCALES', + 'ScaleValueError', 'OperandTypeError', 'TimeInfo'] + + +TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') +MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'), + ('tai', 'tcg'): ('tt',), + ('tai', 'ut1'): ('utc',), + ('tai', 'tdb'): ('tt',), + ('tcb', 'tcg'): ('tdb', 'tt'), + ('tcb', 'tt'): ('tdb',), + ('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'), + ('tcb', 'utc'): ('tdb', 'tt', 'tai'), + ('tcg', 'tdb'): ('tt',), + ('tcg', 'ut1'): ('tt', 'tai', 'utc'), + ('tcg', 'utc'): ('tt', 'tai'), + ('tdb', 'ut1'): ('tt', 'tai', 'utc'), + ('tdb', 'utc'): ('tt', 'tai'), + ('tt', 'ut1'): ('tai', 'utc'), + ('tt', 'utc'): ('tai',), + } +GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg') +BARYCENTRIC_SCALES = ('tcb', 'tdb') +ROTATIONAL_SCALES = ('ut1',) +TIME_DELTA_TYPES = dict((scale, scales) + for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES, + ROTATIONAL_SCALES) for scale in scales) +TIME_DELTA_SCALES = TIME_DELTA_TYPES.keys() +# For time scale changes, we need L_G and L_B, which are stored in erfam.h as +# /* L_G = 1 - d(TT)/d(TCG) */ +# define ERFA_ELG (6.969290134e-10) +# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */ +# define ERFA_ELB (1.550519768e-8) +# These are exposed in erfa as erfa.ELG and erfa.ELB. +# Implied: d(TT)/d(TCG) = 1-L_G +# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G) +# scale offsets as second = first + first * scale_offset[(first,second)] +SCALE_OFFSETS = {('tt', 'tai'): None, + ('tai', 'tt'): None, + ('tcg', 'tt'): -erfa.ELG, + ('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG), + ('tcg', 'tai'): -erfa.ELG, + ('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG), + ('tcb', 'tdb'): -erfa.ELB, + ('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)} + +# triple-level dictionary, yay! +SIDEREAL_TIME_MODELS = { + 'mean': { + 'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')}, + 'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')}, + 'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',)}}, + 'apparent': { + 'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')}, + 'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')}, + 'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)}, + 'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',)}}} + + +class TimeInfo(MixinInfo): + """ + Container for meta information like name, description, format. This is + required when the object is used as a mixin column within a table, but can + be used as a general way to store meta information. + """ + attrs_from_parent = set(['unit']) # unit is read-only and None + attr_names = MixinInfo.attr_names | {'serialize_method'} + _supports_indexing = True + + # The usual tuple of attributes needed for serialization is replaced + # by a property, since Time can be serialized different ways. + _represent_as_dict_extra_attrs = ('format', 'scale', 'precision', + 'in_subfmt', 'out_subfmt', 'location', + '_delta_ut1_utc', '_delta_tdb_tt') + + @property + def _represent_as_dict_attrs(self): + method = self.serialize_method[self._serialize_context] + if method == 'formatted_value': + out = ('value',) + elif method == 'jd1_jd2': + out = ('jd1', 'jd2') + else: + raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'") + + return out + self._represent_as_dict_extra_attrs + + def __init__(self, bound=False): + super(MixinInfo, self).__init__(bound) + + # If bound to a data object instance then create the dict of attributes + # which stores the info attribute values. + if bound: + # Specify how to serialize this object depending on context. + # If ``True`` for a context, then use formatted ``value`` attribute + # (e.g. the ISO time string). If ``False`` then use decimal jd1 and jd2. + self.serialize_method = {'fits': 'jd1_jd2', + 'ecsv': 'formatted_value', + 'hdf5': 'jd1_jd2', + 'yaml': 'jd1_jd2', + None: 'jd1_jd2'} + + @property + def unit(self): + return None + + info_summary_stats = staticmethod( + data_info_factory(names=MixinInfo._stats, + funcs=[getattr(np, stat) for stat in MixinInfo._stats])) + # When Time has mean, std, min, max methods: + # funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats]) + + def _construct_from_dict_base(self, map): + if 'jd1' in map and 'jd2' in map: + format = map.pop('format') + map['format'] = 'jd' + map['val'] = map.pop('jd1') + map['val2'] = map.pop('jd2') + else: + format = map['format'] + map['val'] = map.pop('value') + + out = self._parent_cls(**map) + out.format = format + return out + + def _construct_from_dict(self, map): + delta_ut1_utc = map.pop('_delta_ut1_utc', None) + delta_tdb_tt = map.pop('_delta_tdb_tt', None) + + out = self._construct_from_dict_base(map) + + if delta_ut1_utc is not None: + out._delta_ut1_utc = delta_ut1_utc + if delta_tdb_tt is not None: + out._delta_tdb_tt = delta_tdb_tt + + return out + + +class TimeDeltaInfo(TimeInfo): + _represent_as_dict_extra_attrs = ('format', 'scale') + + def _construct_from_dict(self, map): + return self._construct_from_dict_base(map) + + +class Time(ShapedLikeNDArray): + """ + Represent and manipulate times and dates for astronomy. + + A `Time` object is initialized with one or more times in the ``val`` + argument. The input times in ``val`` must conform to the specified + ``format`` and must correspond to the specified time ``scale``. The + optional ``val2`` time input should be supplied only for numeric input + formats (e.g. JD) where very high precision (better than 64-bit precision) + is required. + + The allowed values for ``format`` can be listed with:: + + >>> list(Time.FORMATS) + ['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date', + 'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str', + 'jyear_str'] + + Parameters + ---------- + val : sequence, str, number, or `~astropy.time.Time` object + Value(s) to initialize the time or times. + val2 : sequence, str, or number; optional + Value(s) to initialize the time or times. + format : str, optional + Format of input value(s) + scale : str, optional + Time scale of input value(s), must be one of the following: + ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') + precision : int, optional + Digits of precision in string representation of time + in_subfmt : str, optional + Subformat for inputting string times + out_subfmt : str, optional + Subformat for outputting string times + location : `~astropy.coordinates.EarthLocation` or tuple, optional + If given as an tuple, it should be able to initialize an + an EarthLocation instance, i.e., either contain 3 items with units of + length for geocentric coordinates, or contain a longitude, latitude, + and an optional height for geodetic coordinates. + Can be a single location, or one for each input time. + copy : bool, optional + Make a copy of the input values + """ + + SCALES = TIME_SCALES + """List of time scales""" + + FORMATS = TIME_FORMATS + """Dict of time formats""" + + # Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__) + # gets called over the __mul__ of Numpy arrays. + __array_priority__ = 20000 + + # Declare that Time can be used as a Table column by defining the + # attribute where column attributes will be stored. + _astropy_column_attrs = None + + def __new__(cls, val, val2=None, format=None, scale=None, + precision=None, in_subfmt=None, out_subfmt=None, + location=None, copy=False): + + if isinstance(val, cls): + self = val.replicate(format=format, copy=copy) + else: + self = super(Time, cls).__new__(cls) + + return self + + def __getnewargs__(self): + return (self._time,) + + def __init__(self, val, val2=None, format=None, scale=None, + precision=None, in_subfmt=None, out_subfmt=None, + location=None, copy=False): + + if location is not None: + from ..coordinates import EarthLocation + if isinstance(location, EarthLocation): + self.location = location + else: + self.location = EarthLocation(*location) + else: + self.location = None + + if isinstance(val, self.__class__): + # Update _time formatting parameters if explicitly specified + if precision is not None: + self._time.precision = precision + if in_subfmt is not None: + self._time.in_subfmt = in_subfmt + if out_subfmt is not None: + self._time.out_subfmt = out_subfmt + + if scale is not None: + self._set_scale(scale) + else: + self._init_from_vals(val, val2, format, scale, copy, + precision, in_subfmt, out_subfmt) + + if self.location is not None and (self.location.size > 1 and + self.location.shape != self.shape): + try: + # check the location can be broadcast to self's shape. + self.location = broadcast_to(self.location, self.shape, + subok=True) + except Exception: + raise ValueError('The location with shape {0} cannot be ' + 'broadcast against time with shape {1}. ' + 'Typically, either give a single location or ' + 'one for each time.' + .format(self.location.shape, self.shape)) + + def _init_from_vals(self, val, val2, format, scale, copy, + precision=None, in_subfmt=None, out_subfmt=None): + """ + Set the internal _format, scale, and _time attrs from user + inputs. This handles coercion into the correct shapes and + some basic input validation. + """ + if precision is None: + precision = 3 + if in_subfmt is None: + in_subfmt = '*' + if out_subfmt is None: + out_subfmt = '*' + + # Coerce val into an array + val = _make_array(val, copy) + + # If val2 is not None, ensure consistency + if val2 is not None: + val2 = _make_array(val2, copy) + try: + np.broadcast(val, val2) + except ValueError: + raise ValueError('Input val and val2 have inconsistent shape; ' + 'they cannot be broadcast together.') + + if scale is not None: + if not (isinstance(scale, six.string_types) and + scale.lower() in self.SCALES): + raise ScaleValueError("Scale {0!r} is not in the allowed scales " + "{1}".format(scale, + sorted(self.SCALES))) + + # Parse / convert input values into internal jd1, jd2 based on format + self._time = self._get_time_fmt(val, val2, format, scale, + precision, in_subfmt, out_subfmt) + self._format = self._time.name + + def _get_time_fmt(self, val, val2, format, scale, + precision, in_subfmt, out_subfmt): + """ + Given the supplied val, val2, format and scale try to instantiate + the corresponding TimeFormat class to convert the input values into + the internal jd1 and jd2. + + If format is `None` and the input is a string-type or object array then + guess available formats and stop when one matches. + """ + + if format is None and val.dtype.kind in ('S', 'U', 'O'): + formats = [(name, cls) for name, cls in self.FORMATS.items() + if issubclass(cls, TimeUnique)] + err_msg = ('any of the formats where the format keyword is ' + 'optional {0}'.format([name for name, cls in formats])) + # AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry, + # but try to guess it at the end. + formats.append(('astropy_time', TimeAstropyTime)) + + elif not (isinstance(format, six.string_types) and + format.lower() in self.FORMATS): + if format is None: + raise ValueError("No time format was given, and the input is " + "not unique") + else: + raise ValueError("Format {0!r} is not one of the allowed " + "formats {1}".format(format, + sorted(self.FORMATS))) + else: + formats = [(format, self.FORMATS[format])] + err_msg = 'the format class {0}'.format(format) + + for format, FormatClass in formats: + try: + return FormatClass(val, val2, scale, precision, in_subfmt, out_subfmt) + except UnitConversionError: + raise + except (ValueError, TypeError): + pass + else: + raise ValueError('Input values did not match {0}'.format(err_msg)) + + @classmethod + def now(cls): + """ + Creates a new object corresponding to the instant in time this + method is called. + + .. note:: + "Now" is determined using the `~datetime.datetime.utcnow` + function, so its accuracy and precision is determined by that + function. Generally that means it is set by the accuracy of + your system clock. + + Returns + ------- + nowtime + A new `Time` object (or a subclass of `Time` if this is called from + such a subclass) at the current time. + """ + # call `utcnow` immediately to be sure it's ASAP + dtnow = datetime.utcnow() + return cls(val=dtnow, format='datetime', scale='utc') + + info = TimeInfo() + + @property + def format(self): + """ + Get or set time format. + + The format defines the way times are represented when accessed via the + ``.value`` attribute. By default it is the same as the format used for + initializing the `Time` instance, but it can be set to any other value + that could be used for initialization. These can be listed with:: + + >>> list(Time.FORMATS) + ['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date', + 'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str', + 'jyear_str'] + """ + return self._format + + @format.setter + def format(self, format): + """Set time format""" + if format not in self.FORMATS: + raise ValueError('format must be one of {0}' + .format(list(self.FORMATS))) + format_cls = self.FORMATS[format] + + # If current output subformat is not in the new format then replace + # with default '*' + if hasattr(format_cls, 'subfmts'): + subfmt_names = [subfmt[0] for subfmt in format_cls.subfmts] + if self.out_subfmt not in subfmt_names: + self.out_subfmt = '*' + + self._time = format_cls(self._time.jd1, self._time.jd2, + self._time._scale, self.precision, + in_subfmt=self.in_subfmt, + out_subfmt=self.out_subfmt, + from_jd=True) + self._format = format + + def __repr__(self): + return ("<{0} object: scale='{1}' format='{2}' value={3}>" + .format(self.__class__.__name__, self.scale, self.format, + getattr(self, self.format))) + + def __str__(self): + return str(getattr(self, self.format)) + + @property + def scale(self): + """Time scale""" + return self._time.scale + + def _set_scale(self, scale): + """ + This is the key routine that actually does time scale conversions. + This is not public and not connected to the read-only scale property. + """ + + if scale == self.scale: + return + if scale not in self.SCALES: + raise ValueError("Scale {0!r} is not in the allowed scales {1}" + .format(scale, sorted(self.SCALES))) + + # Determine the chain of scale transformations to get from the current + # scale to the new scale. MULTI_HOPS contains a dict of all + # transformations (xforms) that require intermediate xforms. + # The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order. + xform = (self.scale, scale) + xform_sort = tuple(sorted(xform)) + multi = MULTI_HOPS.get(xform_sort, ()) + xforms = xform_sort[:1] + multi + xform_sort[-1:] + # If we made the reverse xform then reverse it now. + if xform_sort != xform: + xforms = tuple(reversed(xforms)) + + # Transform the jd1,2 pairs through the chain of scale xforms. + jd1, jd2 = self._time.jd1, self._time.jd2 + for sys1, sys2 in zip(xforms[:-1], xforms[1:]): + # Some xforms require an additional delta_ argument that is + # provided through Time methods. These values may be supplied by + # the user or computed based on available approximations. The + # get_delta_ methods are available for only one combination of + # sys1, sys2 though the property applies for both xform directions. + args = [jd1, jd2] + for sys12 in ((sys1, sys2), (sys2, sys1)): + dt_method = '_get_delta_{0}_{1}'.format(*sys12) + try: + get_dt = getattr(self, dt_method) + except AttributeError: + pass + else: + args.append(get_dt(jd1, jd2)) + break + + conv_func = getattr(erfa, sys1 + sys2) + jd1, jd2 = conv_func(*args) + self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision, + self.in_subfmt, self.out_subfmt, + from_jd=True) + + @property + def precision(self): + """ + Decimal precision when outputting seconds as floating point (int + value between 0 and 9 inclusive). + """ + return self._time.precision + + @precision.setter + def precision(self, val): + if not isinstance(val, int) or val < 0 or val > 9: + raise ValueError('precision attribute must be an int between ' + '0 and 9') + self._time.precision = val + del self.cache + + @property + def in_subfmt(self): + """ + Unix wildcard pattern to select subformats for parsing string input + times. + """ + return self._time.in_subfmt + + @in_subfmt.setter + def in_subfmt(self, val): + if not isinstance(val, six.string_types): + raise ValueError('in_subfmt attribute must be a string') + self._time.in_subfmt = val + del self.cache + + @property + def out_subfmt(self): + """ + Unix wildcard pattern to select subformats for outputting times. + """ + return self._time.out_subfmt + + @out_subfmt.setter + def out_subfmt(self, val): + if not isinstance(val, six.string_types): + raise ValueError('out_subfmt attribute must be a string') + self._time.out_subfmt = val + del self.cache + + @property + def shape(self): + """The shape of the time instances. + + Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a + tuple. Note that if different instances share some but not all + underlying data, setting the shape of one instance can make the other + instance unusable. Hence, it is strongly recommended to get new, + reshaped instances with the ``reshape`` method. + + Raises + ------ + AttributeError + If the shape of the ``jd1``, ``jd2``, ``location``, + ``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed + without the arrays being copied. For these cases, use the + `Time.reshape` method (which copies any arrays that cannot be + reshaped in-place). + """ + return self._time.jd1.shape + + @shape.setter + def shape(self, shape): + # We have to keep track of arrays that were already reshaped, + # since we may have to return those to their original shape if a later + # shape-setting fails. + reshaped = [] + oldshape = self.shape + for attr in ('jd1', 'jd2', '_delta_ut1_utc', '_delta_tdb_tt', + 'location'): + val = getattr(self, attr, None) + if val is not None and val.size > 1: + try: + val.shape = shape + except AttributeError: + for val2 in reshaped: + val2.shape = oldshape + raise + else: + reshaped.append(val) + + def _shaped_like_input(self, value): + return value if self._time.jd1.shape else value.item() + + @property + def jd1(self): + """ + First of the two doubles that internally store time value(s) in JD. + """ + return self._shaped_like_input(self._time.jd1) + + @property + def jd2(self): + """ + Second of the two doubles that internally store time value(s) in JD. + """ + return self._shaped_like_input(self._time.jd2) + + @property + def value(self): + """Time value(s) in current format""" + # The underlying way to get the time values for the current format is: + # self._shaped_like_input(self._time.to_value(parent=self)) + # This is done in __getattr__. By calling getattr(self, self.format) + # the ``value`` attribute is cached. + return getattr(self, self.format) + + def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None): + """Light travel time correction to the barycentre or heliocentre. + + The frame transformations used to calculate the location of the solar + system barycentre and the heliocentre rely on the erfa routine epv00, + which is consistent with the JPL DE405 ephemeris to an accuracy of + 11.2 km, corresponding to a light travel time of 4 microseconds. + + The routine assumes the source(s) are at large distance, i.e., neglects + finite-distance effects. + + Parameters + ---------- + skycoord : `~astropy.coordinates.SkyCoord` + The sky location to calculate the correction for. + kind : str, optional + ``'barycentric'`` (default) or ``'heliocentric'`` + location : `~astropy.coordinates.EarthLocation`, optional + The location of the observatory to calculate the correction for. + If no location is given, the ``location`` attribute of the Time + object is used + ephemeris : str, optional + Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default, + use the one set with ``astropy.coordinates.solar_system_ephemeris.set``. + For more information, see `~astropy.coordinates.solar_system_ephemeris`. + + Returns + ------- + time_offset : `~astropy.time.TimeDelta` + The time offset between the barycentre or Heliocentre and Earth, + in TDB seconds. Should be added to the original time to get the + time in the Solar system barycentre or the Heliocentre. + Also, the time conversion to BJD will then include the relativistic correction as well. + """ + + if kind.lower() not in ('barycentric', 'heliocentric'): + raise ValueError("'kind' parameter must be one of 'heliocentric' " + "or 'barycentric'") + + if location is None: + if self.location is None: + raise ValueError('An EarthLocation needs to be set or passed ' + 'in to calculate bary- or heliocentric ' + 'corrections') + location = self.location + + from ..coordinates import (UnitSphericalRepresentation, CartesianRepresentation, + HCRS, ICRS, GCRS, solar_system_ephemeris) + + # ensure sky location is ICRS compatible + if not skycoord.is_transformable_to(ICRS()): + raise ValueError("Given skycoord is not transformable to the ICRS") + + # get location of observatory in ITRS coordinates at this Time + try: + itrs = location.get_itrs(obstime=self) + except Exception: + raise ValueError("Supplied location does not have a valid `get_itrs` method") + + with solar_system_ephemeris.set(ephemeris): + if kind.lower() == 'heliocentric': + # convert to heliocentric coordinates, aligned with ICRS + cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz + else: + # first we need to convert to GCRS coordinates with the correct + # obstime, since ICRS coordinates have no frame time + gcrs_coo = itrs.transform_to(GCRS(obstime=self)) + # convert to barycentric (BCRS) coordinates, aligned with ICRS + cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz + + # get unit ICRS vector to star + spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation). + represent_as(CartesianRepresentation).xyz) + + # Move X,Y,Z to last dimension, to enable possible broadcasting below. + cpos = np.rollaxis(cpos, 0, cpos.ndim) + spos = np.rollaxis(spos, 0, spos.ndim) + + # calculate light travel time correction + tcor_val = (spos * cpos).sum(axis=-1) / const.c + return TimeDelta(tcor_val, scale='tdb') + + def sidereal_time(self, kind, longitude=None, model=None): + """Calculate sidereal time. + + Parameters + --------------- + kind : str + ``'mean'`` or ``'apparent'``, i.e., accounting for precession + only, or also for nutation. + longitude : `~astropy.units.Quantity`, `str`, or `None`; optional + The longitude on the Earth at which to compute the sidereal time. + Can be given as a `~astropy.units.Quantity` with angular units + (or an `~astropy.coordinates.Angle` or + `~astropy.coordinates.Longitude`), or as a name of an + observatory (currently, only ``'greenwich'`` is supported, + equivalent to 0 deg). If `None` (default), the ``lon`` attribute of + the Time object is used. + model : str or `None`; optional + Precession (and nutation) model to use. The available ones are: + - {0}: {1} + - {2}: {3} + If `None` (default), the last (most recent) one from the appropriate + list above is used. + + Returns + ------- + sidereal time : `~astropy.coordinates.Longitude` + Sidereal time as a quantity with units of hourangle + """ # docstring is formatted below + + from ..coordinates import Longitude + + if kind.lower() not in SIDEREAL_TIME_MODELS.keys(): + raise ValueError('The kind of sidereal time has to be {0}'.format( + ' or '.join(sorted(SIDEREAL_TIME_MODELS.keys())))) + + available_models = SIDEREAL_TIME_MODELS[kind.lower()] + + if model is None: + model = sorted(available_models.keys())[-1] + else: + if model.upper() not in available_models: + raise ValueError( + 'Model {0} not implemented for {1} sidereal time; ' + 'available models are {2}' + .format(model, kind, sorted(available_models.keys()))) + + if longitude is None: + if self.location is None: + raise ValueError('No longitude is given but the location for ' + 'the Time object is not set.') + longitude = self.location.lon + elif longitude == 'greenwich': + longitude = Longitude(0., u.degree, + wrap_angle=180.*u.degree) + else: + # sanity check on input + longitude = Longitude(longitude, u.degree, + wrap_angle=180.*u.degree) + + gst = self._erfa_sidereal_time(available_models[model.upper()]) + return Longitude(gst + longitude, u.hourangle) + + if isinstance(sidereal_time.__doc__, six.string_types): + sidereal_time.__doc__ = sidereal_time.__doc__.format( + 'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()), + 'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys())) + + def _erfa_sidereal_time(self, model): + """Calculate a sidereal time using a IAU precession/nutation model.""" + + from ..coordinates import Longitude + + erfa_function = model['function'] + erfa_parameters = [getattr(getattr(self, scale)._time, jd_part) + for scale in model['scales'] + for jd_part in ('jd1', 'jd2')] + + sidereal_time = erfa_function(*erfa_parameters) + + return Longitude(sidereal_time, u.radian).to(u.hourangle) + + def copy(self, format=None): + """ + Return a fully independent copy the Time object, optionally changing + the format. + + If ``format`` is supplied then the time format of the returned Time + object will be set accordingly, otherwise it will be unchanged from the + original. + + In this method a full copy of the internal time arrays will be made. + The internal time arrays are normally not changeable by the user so in + most cases the ``replicate()`` method should be used. + + Parameters + ---------- + format : str, optional + Time format of the copy. + + Returns + ------- + tm : Time object + Copy of this object + """ + return self._apply('copy', format=format) + + def replicate(self, format=None, copy=False): + """ + Return a replica of the Time object, optionally changing the format. + + If ``format`` is supplied then the time format of the returned Time + object will be set accordingly, otherwise it will be unchanged from the + original. + + If ``copy`` is set to `True` then a full copy of the internal time arrays + will be made. By default the replica will use a reference to the + original arrays when possible to save memory. The internal time arrays + are normally not changeable by the user so in most cases it should not + be necessary to set ``copy`` to `True`. + + The convenience method copy() is available in which ``copy`` is `True` + by default. + + Parameters + ---------- + format : str, optional + Time format of the replica. + copy : bool, optional + Return a true copy instead of using references where possible. + + Returns + ------- + tm : Time object + Replica of this object + """ + return self._apply('copy' if copy else 'replicate', format=format) + + def _apply(self, method, *args, **kwargs): + """Create a new time object, possibly applying a method to the arrays. + + Parameters + ---------- + method : str or callable + If string, can be 'replicate' or the name of a relevant + `~numpy.ndarray` method. In the former case, a new time instance + with unchanged internal data is created, while in the latter the + method is applied to the internal ``jd1`` and ``jd2`` arrays, as + well as to possible ``location``, ``_delta_ut1_utc``, and + ``_delta_tdb_tt`` arrays. + If a callable, it is directly applied to the above arrays. + Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`. + args : tuple + Any positional arguments for ``method``. + kwargs : dict + Any keyword arguments for ``method``. If the ``format`` keyword + argument is present, this will be used as the Time format of the + replica. + + Examples + -------- + Some ways this is used internally:: + + copy : ``_apply('copy')`` + replicate : ``_apply('replicate')`` + reshape : ``_apply('reshape', new_shape)`` + index or slice : ``_apply('__getitem__', item)`` + broadcast : ``_apply(np.broadcast, shape=new_shape)`` + """ + new_format = kwargs.pop('format', None) + if new_format is None: + new_format = self.format + + if callable(method): + apply_method = lambda array: method(array, *args, **kwargs) + else: + if method == 'replicate': + apply_method = None + else: + apply_method = operator.methodcaller(method, *args, **kwargs) + + jd1, jd2 = self._time.jd1, self._time.jd2 + if apply_method: + jd1 = apply_method(jd1) + jd2 = apply_method(jd2) + + tm = super(Time, self.__class__).__new__(self.__class__) + tm._time = TimeJD(jd1, jd2, self.scale, self.precision, + self.in_subfmt, self.out_subfmt, from_jd=True) + # Optional ndarray attributes. + for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location', + 'precision', 'in_subfmt', 'out_subfmt'): + try: + val = getattr(self, attr) + except AttributeError: + continue + + if apply_method: + # Apply the method to any value arrays (though skip if there is + # only a single element and the method would return a view, + # since in that case nothing would change). + if getattr(val, 'size', 1) > 1: + val = apply_method(val) + elif method == 'copy' or method == 'flatten': + # flatten should copy also for a single element array, but + # we cannot use it directly for array scalars, since it + # always returns a one-dimensional array. So, just copy. + val = copy.copy(val) + + setattr(tm, attr, val) + + # Copy other 'info' attr only if it has actually been defined. + # See PR #3898 for further explanation and justification, along + # with Quantity.__array_finalize__ + if 'info' in self.__dict__: + tm.info = self.info + + # Make the new internal _time object corresponding to the format + # in the copy. If the format is unchanged this process is lightweight + # and does not create any new arrays. + if new_format not in tm.FORMATS: + raise ValueError('format must be one of {0}' + .format(list(tm.FORMATS))) + + NewFormat = tm.FORMATS[new_format] + tm._time = NewFormat(tm._time.jd1, tm._time.jd2, + tm._time._scale, tm.precision, + tm.in_subfmt, tm.out_subfmt, + from_jd=True) + tm._format = new_format + + return tm + + def __copy__(self): + """ + Overrides the default behavior of the `copy.copy` function in + the python stdlib to behave like `Time.copy`. Does *not* make a + copy of the JD arrays - only copies by reference. + """ + return self.replicate() + + def __deepcopy__(self, memo): + """ + Overrides the default behavior of the `copy.deepcopy` function + in the python stdlib to behave like `Time.copy`. Does make a + copy of the JD arrays. + """ + return self.copy() + + def _advanced_index(self, indices, axis=None, keepdims=False): + """Turn argmin, argmax output into an advanced index. + + Argmin, argmax output contains indices along a given axis in an array + shaped like the other dimensions. To use this to get values at the + correct location, a list is constructed in which the other axes are + indexed sequentially. For ``keepdims`` is ``True``, the net result is + the same as constructing an index grid with ``np.ogrid`` and then + replacing the ``axis`` item with ``indices`` with its shaped expanded + at ``axis``. For ``keepdims`` is ``False``, the result is the same but + with the ``axis`` dimension removed from all list entries. + + For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`. + + Parameters + ---------- + indices : array + Output of argmin or argmax. + axis : int or None + axis along which argmin or argmax was used. + keepdims : bool + Whether to construct indices that keep or remove the axis along + which argmin or argmax was used. Default: ``False``. + + Returns + ------- + advanced_index : list of arrays + Suitable for use as an advanced index. + """ + if axis is None: + return np.unravel_index(indices, self.shape) + + ndim = self.ndim + if axis < 0: + axis = axis + ndim + + if keepdims and indices.ndim < self.ndim: + indices = np.expand_dims(indices, axis) + return [(indices if i == axis else np.arange(s).reshape( + (1,)*(i if keepdims or i < axis else i-1) + (s,) + + (1,)*(ndim-i-(1 if keepdims or i > axis else 2)))) + for i, s in enumerate(self.shape)] + + def argmin(self, axis=None, out=None): + """Return indices of the minimum values along the given axis. + + This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used. See :func:`~numpy.argmin` for detailed documentation. + """ + # first get the minimum at normal precision. + jd = self.jd1 + self.jd2 + approx = jd.min(axis, keepdims=True) + + # Approx is very close to the true minimum, and by subtracting it at + # full precision, all numbers near 0 can be represented correctly, + # so we can be sure we get the true minimum. + # The below is effectively what would be done for + # dt = (self - self.__class__(approx, format='jd')).jd + # which translates to: + # approx_jd1, approx_jd2 = day_frac(approx, 0.) + # dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2) + dt = (self.jd1 - approx) + self.jd2 + return dt.argmin(axis, out) + + def argmax(self, axis=None, out=None): + """Return indices of the maximum values along the given axis. + + This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used. See :func:`~numpy.argmax` for detailed documentation. + """ + # For procedure, see comment on argmin. + jd = self.jd1 + self.jd2 + approx = jd.max(axis, keepdims=True) + + dt = (self.jd1 - approx) + self.jd2 + return dt.argmax(axis, out) + + def argsort(self, axis=-1): + """Returns the indices that would sort the time array. + + This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used, and that corresponding attributes are copied. Internally, + it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen. + """ + jd_approx = self.jd + jd_remainder = (self - self.__class__(jd_approx, format='jd')).jd + if axis is None: + return np.lexsort((jd_remainder.ravel(), jd_approx.ravel())) + else: + return np.lexsort(keys=(jd_remainder, jd_approx), axis=axis) + + def min(self, axis=None, out=None, keepdims=False): + """Minimum along a given axis. + + This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used, and that corresponding attributes are copied. + + Note that the ``out`` argument is present only for compatibility with + ``np.min``; since `Time` instances are immutable, it is not possible + to have an actual ``out`` to store the result in. + """ + if out is not None: + raise ValueError("Since `Time` instances are immutable, ``out`` " + "cannot be set to anything but ``None``.") + return self[self._advanced_index(self.argmin(axis), axis, keepdims)] + + def max(self, axis=None, out=None, keepdims=False): + """Maximum along a given axis. + + This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used, and that corresponding attributes are copied. + + Note that the ``out`` argument is present only for compatibility with + ``np.max``; since `Time` instances are immutable, it is not possible + to have an actual ``out`` to store the result in. + """ + if out is not None: + raise ValueError("Since `Time` instances are immutable, ``out`` " + "cannot be set to anything but ``None``.") + return self[self._advanced_index(self.argmax(axis), axis, keepdims)] + + def ptp(self, axis=None, out=None, keepdims=False): + """Peak to peak (maximum - minimum) along a given axis. + + This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used. + + Note that the ``out`` argument is present only for compatibility with + `~numpy.ptp`; since `Time` instances are immutable, it is not possible + to have an actual ``out`` to store the result in. + """ + if out is not None: + raise ValueError("Since `Time` instances are immutable, ``out`` " + "cannot be set to anything but ``None``.") + return (self.max(axis, keepdims=keepdims) - + self.min(axis, keepdims=keepdims)) + + def sort(self, axis=-1): + """Return a copy sorted along the specified axis. + + This is similar to :meth:`~numpy.ndarray.sort`, but internally uses + indexing with :func:`~numpy.lexsort` to ensure that the full precision + given by the two doubles ``jd1`` and ``jd2`` is kept, and that + corresponding attributes are properly sorted and copied as well. + + Parameters + ---------- + axis : int or None + Axis to be sorted. If ``None``, the flattened array is sorted. + By default, sort over the last axis. + """ + return self[self._advanced_index(self.argsort(axis), axis, + keepdims=True)] + + @lazyproperty + def cache(self): + """ + Return the cache associated with this instance. + """ + return defaultdict(dict) + + def __getattr__(self, attr): + """ + Get dynamic attributes to output format or do timescale conversion. + """ + if attr in self.SCALES and self.scale is not None: + cache = self.cache['scale'] + if attr not in cache: + if attr == self.scale: + tm = self + else: + tm = self.replicate() + tm._set_scale(attr) + cache[attr] = tm + return cache[attr] + + elif attr in self.FORMATS: + cache = self.cache['format'] + if attr not in cache: + if attr == self.format: + tm = self + else: + tm = self.replicate(format=attr) + value = tm._shaped_like_input(tm._time.to_value(parent=tm)) + cache[attr] = value + return cache[attr] + + elif attr in TIME_SCALES: # allowed ones done above (self.SCALES) + if self.scale is None: + raise ScaleValueError("Cannot convert TimeDelta with " + "undefined scale to any defined scale.") + else: + raise ScaleValueError("Cannot convert {0} with scale " + "'{1}' to scale '{2}'" + .format(self.__class__.__name__, + self.scale, attr)) + + else: + # Should raise AttributeError + return self.__getattribute__(attr) + + @override__dir__ + def __dir__(self): + result = set(self.SCALES) + result.update(self.FORMATS) + return result + + def _match_shape(self, val): + """ + Ensure that `val` is matched to length of self. If val has length 1 + then broadcast, otherwise cast to double and make sure shape matches. + """ + val = _make_array(val, copy=True) # be conservative and copy + if val.size > 1 and val.shape != self.shape: + try: + # check the value can be broadcast to the shape of self. + val = broadcast_to(val, self.shape, subok=True) + except Exception: + raise ValueError('Attribute shape must match or be ' + 'broadcastable to that of Time object. ' + 'Typically, give either a single value or ' + 'one for each time.') + + return val + + def get_delta_ut1_utc(self, iers_table=None, return_status=False): + """Find UT1 - UTC differences by interpolating in IERS Table. + + Parameters + ---------- + iers_table : ``astropy.utils.iers.IERS`` table, optional + Table containing UT1-UTC differences from IERS Bulletins A + and/or B. If `None`, use default version (see + ``astropy.utils.iers``) + return_status : bool + Whether to return status values. If `False` (default), iers + raises `IndexError` if any time is out of the range + covered by the IERS table. + + Returns + ------- + ut1_utc : float or float array + UT1-UTC, interpolated in IERS Table + status : int or int array + Status values (if ``return_status=`True```):: + ``astropy.utils.iers.FROM_IERS_B`` + ``astropy.utils.iers.FROM_IERS_A`` + ``astropy.utils.iers.FROM_IERS_A_PREDICTION`` + ``astropy.utils.iers.TIME_BEFORE_IERS_RANGE`` + ``astropy.utils.iers.TIME_BEYOND_IERS_RANGE`` + + Notes + ----- + In normal usage, UT1-UTC differences are calculated automatically + on the first instance ut1 is needed. + + Examples + -------- + To check in code whether any times are before the IERS table range:: + + >>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE + >>> t = Time(['1961-01-01', '2000-01-01'], scale='utc') + >>> delta, status = t.get_delta_ut1_utc(return_status=True) + >>> status == TIME_BEFORE_IERS_RANGE + array([ True, False]...) + """ + if iers_table is None: + from ..utils.iers import IERS + iers_table = IERS.open() + + return iers_table.ut1_utc(self.utc, return_status=return_status) + + # Property for ERFA DUT arg = UT1 - UTC + def _get_delta_ut1_utc(self, jd1=None, jd2=None): + """ + Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and + jd2 args because it gets called that way when converting time scales. + If delta_ut1_utc is not yet set, this will interpolate them from the + the IERS table. + """ + # Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in + # seconds. It is obtained from tables published by the IERS. + if not hasattr(self, '_delta_ut1_utc'): + from ..utils.iers import IERS_Auto + iers_table = IERS_Auto.open() + # jd1, jd2 are normally set (see above), except if delta_ut1_utc + # is access directly; ensure we behave as expected for that case + if jd1 is None: + self_utc = self.utc + jd1, jd2 = self_utc.jd1, self_utc.jd2 + scale = 'utc' + else: + scale = self.scale + # interpolate UT1-UTC in IERS table + delta = iers_table.ut1_utc(jd1, jd2) + # if we interpolated using UT1 jds, we may be off by one + # second near leap seconds (and very slightly off elsewhere) + if scale == 'ut1': + # calculate UTC using the offset we got; the ERFA routine + # is tolerant of leap seconds, so will do this right + jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta) + # calculate a better estimate using the nearly correct UTC + delta = iers_table.ut1_utc(jd1_utc, jd2_utc) + + self._set_delta_ut1_utc(delta) + + return self._delta_ut1_utc + + def _set_delta_ut1_utc(self, val): + if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. + val = val.to(u.second).value + val = self._match_shape(val) + self._delta_ut1_utc = val + del self.cache + + # Note can't use @property because _get_delta_tdb_tt is explicitly + # called with the optional jd1 and jd2 args. + delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc) + """UT1 - UTC time scale offset""" + + # Property for ERFA DTR arg = TDB - TT + def _get_delta_tdb_tt(self, jd1=None, jd2=None): + if not hasattr(self, '_delta_tdb_tt'): + # If jd1 and jd2 are not provided (which is the case for property + # attribute access) then require that the time scale is TT or TDB. + # Otherwise the computations here are not correct. + if jd1 is None or jd2 is None: + if self.scale not in ('tt', 'tdb'): + raise ValueError('Accessing the delta_tdb_tt attribute ' + 'is only possible for TT or TDB time ' + 'scales') + else: + jd1 = self._time.jd1 + jd2 = self._time.jd2 + + # First go from the current input time (which is either + # TDB or TT) to an approximate UT1. Since TT and TDB are + # pretty close (few msec?), assume TT. Similarly, since the + # UT1 terms are very small, use UTC instead of UT1. + njd1, njd2 = erfa.tttai(jd1, jd2) + njd1, njd2 = erfa.taiutc(njd1, njd2) + # subtract 0.5, so UT is fraction of the day from midnight + ut = day_frac(njd1 - 0.5, njd2)[1] + + if self.location is None: + from ..coordinates import EarthLocation + location = EarthLocation.from_geodetic(0., 0., 0.) + else: + location = self.location + # Geodetic params needed for d_tdb_tt() + lon = location.lon + rxy = np.hypot(location.x, location.y) + z = location.z + self._delta_tdb_tt = erfa.dtdb( + jd1, jd2, ut, lon.to_value(u.radian), + rxy.to_value(u.km), z.to_value(u.km)) + + return self._delta_tdb_tt + + def _set_delta_tdb_tt(self, val): + if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. + val = val.to(u.second).value + val = self._match_shape(val) + self._delta_tdb_tt = val + del self.cache + + # Note can't use @property because _get_delta_tdb_tt is explicitly + # called with the optional jd1 and jd2 args. + delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt) + """TDB - TT time scale offset""" + + def __sub__(self, other): + if not isinstance(other, Time): + try: + other = TimeDelta(other) + except Exception: + raise OperandTypeError(self, other, '-') + + # Tdelta - something is dealt with in TimeDelta, so we have + # T - Tdelta = T + # T - T = Tdelta + other_is_delta = isinstance(other, TimeDelta) + + # we need a constant scale to calculate, which is guaranteed for + # TimeDelta, but not for Time (which can be UTC) + if other_is_delta: # T - Tdelta + out = self.replicate() + if self.scale in other.SCALES: + if other.scale not in (out.scale, None): + other = getattr(other, out.scale) + else: + out._set_scale(other.scale if other.scale is not None + else 'tai') + # remove attributes that are invalidated by changing time + for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): + if hasattr(out, attr): + delattr(out, attr) + + else: # T - T + self_time = (self._time if self.scale in TIME_DELTA_SCALES + else self.tai._time) + # set up TimeDelta, subtraction to be done shortly + out = TimeDelta(self_time.jd1, self_time.jd2, format='jd', + scale=self_time.scale) + + if other.scale != out.scale: + other = getattr(other, out.scale) + + jd1 = out._time.jd1 - other._time.jd1 + jd2 = out._time.jd2 - other._time.jd2 + + out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) + + if other_is_delta: + # Go back to left-side scale if needed + out._set_scale(self.scale) + + return out + + def __add__(self, other): + if not isinstance(other, Time): + try: + other = TimeDelta(other) + except Exception: + raise OperandTypeError(self, other, '+') + + # Tdelta + something is dealt with in TimeDelta, so we have + # T + Tdelta = T + # T + T = error + + if not isinstance(other, TimeDelta): + raise OperandTypeError(self, other, '+') + + # ideally, we calculate in the scale of the Time item, since that is + # what we want the output in, but this may not be possible, since + # TimeDelta cannot be converted arbitrarily + out = self.replicate() + if self.scale in other.SCALES: + if other.scale not in (out.scale, None): + other = getattr(other, out.scale) + else: + out._set_scale(other.scale if other.scale is not None else 'tai') + + # remove attributes that are invalidated by changing time + for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): + if hasattr(out, attr): + delattr(out, attr) + + jd1 = out._time.jd1 + other._time.jd1 + jd2 = out._time.jd2 + other._time.jd2 + + out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) + + # Go back to left-side scale if needed + out._set_scale(self.scale) + + return out + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + out = self.__sub__(other) + return -out + + def _time_difference(self, other, op=None): + """If other is of same class as self, return difference in self.scale. + Otherwise, raise OperandTypeError. + """ + if other.__class__ is not self.__class__: + try: + other = self.__class__(other, scale=self.scale) + except Exception: + raise OperandTypeError(self, other, op) + + if(self.scale is not None and self.scale not in other.SCALES or + other.scale is not None and other.scale not in self.SCALES): + raise TypeError("Cannot compare TimeDelta instances with scales " + "'{0}' and '{1}'".format(self.scale, other.scale)) + + if self.scale is not None and other.scale is not None: + other = getattr(other, self.scale) + + return (self.jd1 - other.jd1) + (self.jd2 - other.jd2) + + def __lt__(self, other): + return self._time_difference(other, '<') < 0. + + def __le__(self, other): + return self._time_difference(other, '<=') <= 0. + + def __eq__(self, other): + """ + If other is an incompatible object for comparison, return `False`. + Otherwise, return `True` if the time difference between self and + other is zero. + """ + try: + diff = self._time_difference(other) + except OperandTypeError: + return False + return diff == 0. + + def __ne__(self, other): + """ + If other is an incompatible object for comparison, return `True`. + Otherwise, return `False` if the time difference between self and + other is zero. + """ + try: + diff = self._time_difference(other) + except OperandTypeError: + return True + return diff != 0. + + def __gt__(self, other): + return self._time_difference(other, '>') > 0. + + def __ge__(self, other): + return self._time_difference(other, '>=') >= 0. + + def to_datetime(self, timezone=None): + tm = self.replicate(format='datetime') + return tm._shaped_like_input(tm._time.to_value(timezone)) + + to_datetime.__doc__ = TimeDatetime.to_value.__doc__ + + +class TimeDelta(Time): + """ + Represent the time difference between two times. + + A TimeDelta object is initialized with one or more times in the ``val`` + argument. The input times in ``val`` must conform to the specified + ``format``. The optional ``val2`` time input should be supplied only for + numeric input formats (e.g. JD) where very high precision (better than + 64-bit precision) is required. + + The allowed values for ``format`` can be listed with:: + + >>> list(TimeDelta.FORMATS) + ['sec', 'jd'] + + Note that for time differences, the scale can be among three groups: + geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational + ('ut1'). Within each of these, the scales for time differences are the + same. Conversion between geocentric and barycentric is possible, as there + is only a scale factor change, but one cannot convert to or from 'ut1', as + this requires knowledge of the actual times, not just their difference. For + a similar reason, 'utc' is not a valid scale for a time difference: a UTC + day is not always 86400 seconds. + + Parameters + ---------- + val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object + Value(s) to initialize the time difference(s). Any quantities will + be converted appropriately (with care taken to avoid rounding + errors for regular time units). + val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional + Additional values, as needed to preserve precision. + format : str, optional + Format of input value(s) + scale : str, optional + Time scale of input value(s), must be one of the following values: + ('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or + ``None``), the scale is arbitrary; when added or subtracted from a + ``Time`` instance, it will be used without conversion. + copy : bool, optional + Make a copy of the input values + """ + SCALES = TIME_DELTA_SCALES + """List of time delta scales.""" + + FORMATS = TIME_DELTA_FORMATS + """Dict of time delta formats.""" + + info = TimeDeltaInfo() + + def __init__(self, val, val2=None, format=None, scale=None, copy=False): + if isinstance(val, TimeDelta): + if scale is not None: + self._set_scale(scale) + else: + if format is None: + format = 'jd' + + self._init_from_vals(val, val2, format, scale, copy) + + if scale is not None: + self.SCALES = TIME_DELTA_TYPES[scale] + + def replicate(self, *args, **kwargs): + out = super(TimeDelta, self).replicate(*args, **kwargs) + out.SCALES = self.SCALES + return out + + def _set_scale(self, scale): + """ + This is the key routine that actually does time scale conversions. + This is not public and not connected to the read-only scale property. + """ + + if scale == self.scale: + return + if scale not in self.SCALES: + raise ValueError("Scale {0!r} is not in the allowed scales {1}" + .format(scale, sorted(self.SCALES))) + + # For TimeDelta, there can only be a change in scale factor, + # which is written as time2 - time1 = scale_offset * time1 + scale_offset = SCALE_OFFSETS[(self.scale, scale)] + if scale_offset is None: + self._time.scale = scale + else: + jd1, jd2 = self._time.jd1, self._time.jd2 + offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset) + self._time = self.FORMATS[self.format]( + jd1 + offset1, jd2 + offset2, scale, + self.precision, self.in_subfmt, + self.out_subfmt, from_jd=True) + + def __add__(self, other): + # only deal with TimeDelta + TimeDelta + if isinstance(other, Time): + if not isinstance(other, TimeDelta): + return other.__add__(self) + else: + try: + other = TimeDelta(other) + except Exception: + raise OperandTypeError(self, other, '+') + + # the scales should be compatible (e.g., cannot convert TDB to TAI) + if(self.scale is not None and self.scale not in other.SCALES or + other.scale is not None and other.scale not in self.SCALES): + raise TypeError("Cannot add TimeDelta instances with scales " + "'{0}' and '{1}'".format(self.scale, other.scale)) + + # adjust the scale of other if the scale of self is set (or no scales) + if self.scale is not None or other.scale is None: + out = self.replicate() + if other.scale is not None: + other = getattr(other, self.scale) + else: + out = other.replicate() + + jd1 = self._time.jd1 + other._time.jd1 + jd2 = self._time.jd2 + other._time.jd2 + + out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) + + return out + + def __sub__(self, other): + # only deal with TimeDelta - TimeDelta + if isinstance(other, Time): + if not isinstance(other, TimeDelta): + raise OperandTypeError(self, other, '-') + else: + try: + other = TimeDelta(other) + except Exception: + raise OperandTypeError(self, other, '-') + + # the scales should be compatible (e.g., cannot convert TDB to TAI) + if(self.scale is not None and self.scale not in other.SCALES or + other.scale is not None and other.scale not in self.SCALES): + raise TypeError("Cannot subtract TimeDelta instances with scales " + "'{0}' and '{1}'".format(self.scale, other.scale)) + + # adjust the scale of other if the scale of self is set (or no scales) + if self.scale is not None or other.scale is None: + out = self.replicate() + if other.scale is not None: + other = getattr(other, self.scale) + else: + out = other.replicate() + + jd1 = self._time.jd1 - other._time.jd1 + jd2 = self._time.jd2 - other._time.jd2 + + out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) + + return out + + def __neg__(self): + """Negation of a `TimeDelta` object.""" + new = self.copy() + new._time.jd1 = -self._time.jd1 + new._time.jd2 = -self._time.jd2 + return new + + def __abs__(self): + """Absolute value of a `TimeDelta` object.""" + jd1, jd2 = self._time.jd1, self._time.jd2 + negative = jd1 + jd2 < 0 + new = self.copy() + new._time.jd1 = np.where(negative, -jd1, jd1) + new._time.jd2 = np.where(negative, -jd2, jd2) + return new + + def __mul__(self, other): + """Multiplication of `TimeDelta` objects by numbers/arrays.""" + # check needed since otherwise the self.jd1 * other multiplication + # would enter here again (via __rmul__) + if isinstance(other, Time): + raise OperandTypeError(self, other, '*') + + try: # convert to straight float if dimensionless quantity + other = other.to(1) + except Exception: + pass + + try: + jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other) + out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) + except Exception as err: # try downgrading self to a quantity + try: + return self.to(u.day) * other + except Exception: + raise err + + if self.format != 'jd': + out = out.replicate(format=self.format) + return out + + def __rmul__(self, other): + """Multiplication of numbers/arrays with `TimeDelta` objects.""" + return self.__mul__(other) + + def __div__(self, other): + """Division of `TimeDelta` objects by numbers/arrays.""" + return self.__truediv__(other) + + def __rdiv__(self, other): + """Division by `TimeDelta` objects of numbers/arrays.""" + return self.__rtruediv__(other) + + def __truediv__(self, other): + """Division of `TimeDelta` objects by numbers/arrays.""" + # cannot do __mul__(1./other) as that looses precision + try: + other = other.to(1) + except Exception: + pass + + try: # convert to straight float if dimensionless quantity + jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other) + out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) + except Exception as err: # try downgrading self to a quantity + try: + return self.to(u.day) / other + except Exception: + raise err + + if self.format != 'jd': + out = out.replicate(format=self.format) + return out + + def __rtruediv__(self, other): + """Division by `TimeDelta` objects of numbers/arrays.""" + return other / self.to(u.day) + + def to(self, *args, **kwargs): + return u.Quantity(self._time.jd1 + self._time.jd2, + u.day).to(*args, **kwargs) + + +class ScaleValueError(Exception): + pass + + +def _make_array(val, copy=False): + """ + Take ``val`` and convert/reshape to an array. If ``copy`` is `True` + then copy input values. + + Returns + ------- + val : ndarray + Array version of ``val``. + """ + val = np.array(val, copy=copy, subok=True) + + # Allow only float64, string or object arrays as input + # (object is for datetime, maybe add more specific test later?) + # This also ensures the right byteorder for float64 (closes #2942). + if not (val.dtype == np.float64 or val.dtype.kind in 'OSUa'): + val = np.asanyarray(val, dtype=np.float64) + + return val + + +class OperandTypeError(TypeError): + def __init__(self, left, right, op=None): + op_string = '' if op is None else ' for {0}'.format(op) + super(OperandTypeError, self).__init__( + "Unsupported operand type(s){0}: " + "'{1}' and '{2}'".format(op_string, + left.__class__.__name__, + right.__class__.__name__)) diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/core.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/core.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95d55d08e4ea514c666124f4c01d7041e86723e5 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/core.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/formats.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/formats.py new file mode 100644 index 0000000000000000000000000000000000000000..5e02fe3a7728039bdec7a650d23dff7aa975e0bf --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/formats.py @@ -0,0 +1,1170 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import fnmatch +import time +import re +import datetime +from collections import OrderedDict + +import numpy as np + +from .. import units as u +from .. import _erfa as erfa +from ..extern import six +from ..extern.six.moves import zip +from .utils import day_frac, quantity_day_frac, two_sum, two_product + + +__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix', + 'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear', + 'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString', + 'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime', + 'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch', + 'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD', + 'TimeEpochDateString', 'TimeBesselianEpochString', + 'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS', + 'TimezoneInfo'] + +__doctest_skip__ = ['TimePlotDate'] + +# These both get filled in at end after TimeFormat subclasses defined. +# Use an OrderedDict to fix the order in which formats are tried. +# This ensures, e.g., that 'isot' gets tried before 'fits'. +TIME_FORMATS = OrderedDict() +TIME_DELTA_FORMATS = OrderedDict() + +# Translations between deprecated FITS timescales defined by +# Rots et al. 2015, A&A 574:A36, and timescales used here. +FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt', + 'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'} + + +def _regexify_subfmts(subfmts): + """ + Iterate through each of the sub-formats and try substituting simple + regular expressions for the strptime codes for year, month, day-of-month, + hour, minute, second. If no % characters remain then turn the final string + into a compiled regex. This assumes time formats do not have a % in them. + + This is done both to speed up parsing of strings and to allow mixed formats + where strptime does not quite work well enough. + """ + new_subfmts = [] + for subfmt_tuple in subfmts: + subfmt_in = subfmt_tuple[1] + for strptime_code, regex in (('%Y', r'(?P\d\d\d\d)'), + ('%m', r'(?P\d{1,2})'), + ('%d', r'(?P\d{1,2})'), + ('%H', r'(?P\d{1,2})'), + ('%M', r'(?P\d{1,2})'), + ('%S', r'(?P\d{1,2})')): + subfmt_in = subfmt_in.replace(strptime_code, regex) + + if '%' not in subfmt_in: + subfmt_tuple = (subfmt_tuple[0], + re.compile(subfmt_in + '$'), + subfmt_tuple[2]) + new_subfmts.append(subfmt_tuple) + + return tuple(new_subfmts) + + +class TimeFormatMeta(type): + """ + Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the + `TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively. + """ + + _registry = TIME_FORMATS + + def __new__(mcls, name, bases, members): + cls = super(TimeFormatMeta, mcls).__new__(mcls, name, bases, members) + + # Register time formats that have a name, but leave out astropy_time since + # it is not a user-accessible format and is only used for initialization into + # a different format. + if 'name' in members and cls.name != 'astropy_time': + mcls._registry[cls.name] = cls + + if 'subfmts' in members: + cls.subfmts = _regexify_subfmts(members['subfmts']) + + return cls + + +@six.add_metaclass(TimeFormatMeta) +class TimeFormat(object): + """ + Base class for time representations. + + Parameters + ---------- + val1 : numpy ndarray, list, str, or number + Data to initialize table. + val2 : numpy ndarray, list, str, or number; optional + Data to initialize table. + scale : str + Time scale of input value(s) + precision : int + Precision for seconds as floating point + in_subfmt : str + Select subformat for inputting string times + out_subfmt : str + Select subformat for outputting string times + from_jd : bool + If true then val1, val2 are jd1, jd2 + """ + + def __init__(self, val1, val2, scale, precision, + in_subfmt, out_subfmt, from_jd=False): + self.scale = scale # validation of scale done later with _check_scale + self.precision = precision + self.in_subfmt = in_subfmt + self.out_subfmt = out_subfmt + + if from_jd: + self.jd1 = val1 + self.jd2 = val2 + else: + val1, val2 = self._check_val_type(val1, val2) + self.set_jds(val1, val2) + + def __len__(self): + return len(self.jd1) + + @property + def scale(self): + """Time scale""" + self._scale = self._check_scale(self._scale) + return self._scale + + @scale.setter + def scale(self, val): + self._scale = val + + def _check_val_type(self, val1, val2): + """Input value validation, typically overridden by derived classes""" + if not (val1.dtype == np.double and np.all(np.isfinite(val1)) and + (val2 is None or + val2.dtype == np.double and np.all(np.isfinite(val2)))): + raise TypeError('Input values for {0} class must be finite doubles' + .format(self.name)) + + if getattr(val1, 'unit', None) is not None: + # Convert any quantity-likes to days first, attempting to be + # careful with the conversion, so that, e.g., large numbers of + # seconds get converted without loosing precision because + # 1/86400 is not exactly representable as a float. + val1 = u.Quantity(val1, copy=False) + if val2 is not None: + val2 = u.Quantity(val2, copy=False) + + try: + val1, val2 = quantity_day_frac(val1, val2) + except u.UnitsError: + raise u.UnitConversionError( + "only quantities with time units can be " + "used to instantiate Time instances.") + # We now have days, but the format may expect another unit. + # On purpose, multiply with 1./day_unit because typically it is + # 1./erfa.DAYSEC, and inverting it recovers the integer. + # (This conversion will get undone in format's set_jds, hence + # there may be room for optimizing this.) + factor = 1. / getattr(self, 'unit', 1.) + if factor != 1.: + val1, carry = two_product(val1, factor) + carry += val2 * factor + val1, val2 = two_sum(val1, carry) + + elif getattr(val2, 'unit', None) is not None: + raise TypeError('Cannot mix float and Quantity inputs') + + if val2 is None: + val2 = np.zeros_like(val1) + + def asarray_or_scalar(val): + """ + Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray + or a Python or numpy scalar. + """ + return np.asarray(val) if isinstance(val, np.ndarray) else val + + return asarray_or_scalar(val1), asarray_or_scalar(val2) + + def _check_scale(self, scale): + """ + Return a validated scale value. + + If there is a class attribute 'scale' then that defines the default / + required time scale for this format. In this case if a scale value was + provided that needs to match the class default, otherwise return + the class default. + + Otherwise just make sure that scale is in the allowed list of + scales. Provide a different error message if `None` (no value) was + supplied. + """ + if hasattr(self.__class__, 'epoch_scale') and scale is None: + scale = self.__class__.epoch_scale + + if scale is None: + scale = 'utc' # Default scale as of astropy 0.4 + + if scale not in TIME_SCALES: + raise ScaleValueError("Scale value '{0}' not in " + "allowed values {1}" + .format(scale, TIME_SCALES)) + + return scale + + def set_jds(self, val1, val2): + """ + Set internal jd1 and jd2 from val1 and val2. Must be provided + by derived classes. + """ + raise NotImplementedError + + def to_value(self, parent=None): + """ + Return time representation from internal jd1 and jd2. This is + the base method that ignores ``parent`` and requires that + subclasses implement the ``value`` property. Subclasses that + require ``parent`` or have other optional args for ``to_value`` + should compute and return the value directly. + """ + return self.value + + @property + def value(self): + raise NotImplementedError + + +class TimeJD(TimeFormat): + """ + Julian Date time format. + This represents the number of days since the beginning of + the Julian Period. + For example, 2451544.5 in JD is midnight on January 1, 2000. + """ + name = 'jd' + + def set_jds(self, val1, val2): + self._check_scale(self._scale) # Validate scale. + self.jd1, self.jd2 = day_frac(val1, val2) + + @property + def value(self): + return self.jd1 + self.jd2 + + +class TimeMJD(TimeFormat): + """ + Modified Julian Date time format. + This represents the number of days since midnight on November 17, 1858. + For example, 51544.0 in MJD is midnight on January 1, 2000. + """ + name = 'mjd' + + def set_jds(self, val1, val2): + # TODO - this routine and vals should be Cythonized to follow the ERFA + # convention of preserving precision by adding to the larger of the two + # values in a vectorized operation. But in most practical cases the + # first one is probably biggest. + self._check_scale(self._scale) # Validate scale. + jd1, jd2 = day_frac(val1, val2) + jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + @property + def value(self): + return (self.jd1 - erfa.DJM0) + self.jd2 + + +class TimeDecimalYear(TimeFormat): + """ + Time as a decimal year, with integer values corresponding to midnight + of the first day of each year. For example 2000.5 corresponds to the + ISO time '2000-07-02 00:00:00'. + """ + name = 'decimalyear' + + def set_jds(self, val1, val2): + self._check_scale(self._scale) # Validate scale. + + sum12, err12 = two_sum(val1, val2) + iy_start = np.trunc(sum12).astype(np.int) + extra, y_frac = two_sum(sum12, -iy_start) + y_frac += extra + err12 + + val = (val1 + val2).astype(np.double) + iy_start = np.trunc(val).astype(np.int) + + imon = np.ones_like(iy_start) + iday = np.ones_like(iy_start) + ihr = np.zeros_like(iy_start) + imin = np.zeros_like(iy_start) + isec = np.zeros_like(y_frac) + + # Possible enhancement: use np.unique to only compute start, stop + # for unique values of iy_start. + scale = self.scale.upper().encode('ascii') + jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, + ihr, imin, isec) + jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, + ihr, imin, isec) + + t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd') + t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd') + t_frac = t_start + (t_end - t_start) * y_frac + + self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2) + + @property + def value(self): + scale = self.scale.upper().encode('ascii') + iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0 + self.jd1, self.jd2) + imon = np.ones_like(iy_start) + iday = np.ones_like(iy_start) + ihr = np.zeros_like(iy_start) + imin = np.zeros_like(iy_start) + isec = np.zeros_like(self.jd1) + + # Possible enhancement: use np.unique to only compute start, stop + # for unique values of iy_start. + scale = self.scale.upper().encode('ascii') + jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, + ihr, imin, isec) + jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, + ihr, imin, isec) + + dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start) + dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start) + decimalyear = iy_start + dt / dt_end + + return decimalyear + + +class TimeFromEpoch(TimeFormat): + """ + Base class for times that represent the interval from a particular + epoch as a floating point multiple of a unit time interval (e.g. seconds + or days). + """ + + def __init__(self, val1, val2, scale, precision, + in_subfmt, out_subfmt, from_jd=False): + self.scale = scale + # Initialize the reference epoch (a single time defined in subclasses) + epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale, + format=self.epoch_format) + self.epoch = epoch + + # Now create the TimeFormat object as normal + super(TimeFromEpoch, self).__init__(val1, val2, scale, precision, + in_subfmt, out_subfmt, from_jd) + + def set_jds(self, val1, val2): + """ + Initialize the internal jd1 and jd2 attributes given val1 and val2. + For an TimeFromEpoch subclass like TimeUnix these will be floats giving + the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00). + """ + # Form new JDs based on epoch time + time from epoch (converted to JD). + # One subtlety that might not be obvious is that 1.000 Julian days in + # UTC can be 86400 or 86401 seconds. For the TimeUnix format the + # assumption is that every day is exactly 86400 seconds, so this is, in + # principle, doing the math incorrectly, *except* that it matches the + # definition of Unix time which does not include leap seconds. + + # note: use divisor=1./self.unit, since this is either 1 or 1/86400, + # and 1/86400 is not exactly representable as a float64, so multiplying + # by that will cause rounding errors. (But inverting it as a float64 + # recovers the exact number) + day, frac = day_frac(val1, val2, divisor=1. / self.unit) + + jd1 = self.epoch.jd1 + day + jd2 = self.epoch.jd2 + frac + + # Create a temporary Time object corresponding to the new (jd1, jd2) in + # the epoch scale (e.g. UTC for TimeUnix) then convert that to the + # desired time scale for this object. + # + # A known limitation is that the transform from self.epoch_scale to + # self.scale cannot involve any metadata like lat or lon. + try: + tm = getattr(Time(jd1, jd2, scale=self.epoch_scale, + format='jd'), self.scale) + except Exception as err: + raise ScaleValueError("Cannot convert from '{0}' epoch scale '{1}'" + "to specified scale '{2}', got error:\n{3}" + .format(self.name, self.epoch_scale, + self.scale, err)) + + self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2) + + def to_value(self, parent=None): + # Make sure that scale is the same as epoch scale so we can just + # subtract the epoch and convert + if self.scale != self.epoch_scale: + if parent is None: + raise ValueError('cannot compute value without parent Time object') + tm = getattr(parent, self.epoch_scale) + jd1, jd2 = tm._time.jd1, tm._time.jd2 + else: + jd1, jd2 = self.jd1, self.jd2 + + time_from_epoch = ((jd1 - self.epoch.jd1) + + (jd2 - self.epoch.jd2)) / self.unit + return time_from_epoch + + value = property(to_value) + + +class TimeUnix(TimeFromEpoch): + """ + Unix time: seconds from 1970-01-01 00:00:00 UTC. + For example, 946684800.0 in Unix time is midnight on January 1, 2000. + + NOTE: this quantity is not exactly unix time and differs from the strict + POSIX definition by up to 1 second on days with a leap second. POSIX + unix time actually jumps backward by 1 second at midnight on leap second + days while this class value is monotonically increasing at 86400 seconds + per UTC day. + """ + name = 'unix' + unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) + epoch_val = '1970-01-01 00:00:00' + epoch_val2 = None + epoch_scale = 'utc' + epoch_format = 'iso' + + +class TimeCxcSec(TimeFromEpoch): + """ + Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT. + For example, 63072064.184 is midnight on January 1, 2000. + """ + name = 'cxcsec' + unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) + epoch_val = '1998-01-01 00:00:00' + epoch_val2 = None + epoch_scale = 'tt' + epoch_format = 'iso' + + +class TimeGPS(TimeFromEpoch): + """GPS time: seconds from 1980-01-06 00:00:00 UTC + For example, 630720013.0 is midnight on January 1, 2000. + + Notes + ===== + This implementation is strictly a representation of the number of seconds + (including leap seconds) since midnight UTC on 1980-01-06. GPS can also be + considered as a time scale which is ahead of TAI by a fixed offset + (to within about 100 nanoseconds). + + For details, see http://tycho.usno.navy.mil/gpstt.html + """ + name = 'gps' + unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) + epoch_val = '1980-01-06 00:00:19' + # above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai + epoch_val2 = None + epoch_scale = 'tai' + epoch_format = 'iso' + + +class TimePlotDate(TimeFromEpoch): + """ + Matplotlib `~matplotlib.pyplot.plot_date` input: + 1 + number of days from 0001-01-01 00:00:00 UTC + + This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date` + function:: + + >>> import matplotlib.pyplot as plt + >>> jyear = np.linspace(2000, 2001, 20) + >>> t = Time(jyear, format='jyear', scale='utc') + >>> plt.plot_date(t.plot_date, jyear) + >>> plt.gcf().autofmt_xdate() # orient date labels at a slant + >>> plt.draw() + + For example, 730120.0003703703 is midnight on January 1, 2000. + """ + # This corresponds to the zero reference time for matplotlib plot_date(). + # Note that TAI and UTC are equivalent at the reference time. + name = 'plot_date' + unit = 1.0 + epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1 + epoch_val2 = None + epoch_scale = 'utc' + epoch_format = 'jd' + + +class TimeUnique(TimeFormat): + """ + Base class for time formats that can uniquely create a time object + without requiring an explicit format specifier. This class does + nothing but provide inheritance to identify a class as unique. + """ + + +class TimeAstropyTime(TimeUnique): + """ + Instantiate date from an Astropy Time object (or list thereof). + + This is purely for instantiating from a Time object. The output + format is the same as the first time instance. + """ + name = 'astropy_time' + + def __new__(cls, val1, val2, scale, precision, + in_subfmt, out_subfmt, from_jd=False): + """ + Use __new__ instead of __init__ to output a class instance that + is the same as the class of the first Time object in the list. + """ + val1_0 = val1.flat[0] + if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0) + for val in val1.flat)): + raise TypeError('Input values for {0} class must all be same ' + 'astropy Time type.'.format(cls.name)) + + if scale is None: + scale = val1_0.scale + if val1.shape: + vals = [getattr(val, scale)._time for val in val1] + jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals]) + jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals]) + else: + val = getattr(val1_0, scale)._time + jd1, jd2 = val.jd1, val.jd2 + + OutTimeFormat = val1_0._time.__class__ + self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt, + from_jd=True) + + return self + + +class TimeDatetime(TimeUnique): + """ + Represent date as Python standard library `~datetime.datetime` object + + Example:: + + >>> from astropy.time import Time + >>> from datetime import datetime + >>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc') + >>> t.iso + '2000-01-02 12:00:00.000' + >>> t.tt.datetime + datetime.datetime(2000, 1, 2, 12, 1, 4, 184000) + """ + name = 'datetime' + + def _check_val_type(self, val1, val2): + # Note: don't care about val2 for this class + if not all(isinstance(val, datetime.datetime) for val in val1.flat): + raise TypeError('Input values for {0} class must be ' + 'datetime objects'.format(self.name)) + return val1, None + + def set_jds(self, val1, val2): + """Convert datetime object contained in val1 to jd1, jd2""" + # Iterate through the datetime objects, getting year, month, etc. + iterator = np.nditer([val1, None, None, None, None, None, None], + flags=['refs_ok'], + op_dtypes=[np.object] + 5*[np.intc] + [np.double]) + for val, iy, im, id, ihr, imin, dsec in iterator: + dt = val.item() + + if dt.tzinfo is not None: + dt = (dt - dt.utcoffset()).replace(tzinfo=None) + + iy[...] = dt.year + im[...] = dt.month + id[...] = dt.day + ihr[...] = dt.hour + imin[...] = dt.minute + dsec[...] = dt.second + dt.microsecond / 1e6 + + jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), + *iterator.operands[1:]) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + def to_value(self, timezone=None, parent=None): + """ + Convert to (potentially timezone-aware) `~datetime.datetime` object. + + If ``timezone`` is not ``None``, return a timezone-aware datetime + object. + + Parameters + ---------- + timezone : {`~datetime.tzinfo`, None} (optional) + If not `None`, return timezone-aware datetime. + + Returns + ------- + `~datetime.datetime` + If ``timezone`` is not ``None``, output will be timezone-aware. + """ + if timezone is not None: + if self._scale != 'utc': + raise ScaleValueError("scale is {}, must be 'utc' when timezone " + "is supplied.".format(self._scale)) + + # Rather than define a value property directly, we have a function, + # since we want to be able to pass in timezone information. + scale = self.scale.upper().encode('ascii') + iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec + self.jd1, self.jd2) + ihrs = ihmsfs[..., 0] + imins = ihmsfs[..., 1] + isecs = ihmsfs[..., 2] + ifracs = ihmsfs[..., 3] + iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None], + flags=['refs_ok'], + op_dtypes=7*[iys.dtype] + [np.object]) + + for iy, im, id, ihr, imin, isec, ifracsec, out in iterator: + if isec >= 60: + raise ValueError('Time {} is within a leap second but datetime ' + 'does not support leap seconds' + .format((iy, im, id, ihr, imin, isec, ifracsec))) + if timezone is not None: + out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec, + tzinfo=TimezoneInfo()).astimezone(timezone) + else: + out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec) + return iterator.operands[-1] + + value = property(to_value) + + +class TimezoneInfo(datetime.tzinfo): + """ + Subclass of the `~datetime.tzinfo` object, used in the + to_datetime method to specify timezones. + + It may be safer in most cases to use a timezone database package like + pytz rather than defining your own timezones - this class is mainly + a workaround for users without pytz. + """ + @u.quantity_input(utc_offset=u.day, dst=u.day) + def __init__(self, utc_offset=0*u.day, dst=0*u.day, tzname=None): + """ + Parameters + ---------- + utc_offset : `~astropy.units.Quantity` (optional) + Offset from UTC in days. Defaults to zero. + dst : `~astropy.units.Quantity` (optional) + Daylight Savings Time offset in days. Defaults to zero + (no daylight savings). + tzname : string, `None` (optional) + Name of timezone + + Examples + -------- + >>> from datetime import datetime + >>> from astropy.time import TimezoneInfo # Specifies a timezone + >>> import astropy.units as u + >>> utc = TimezoneInfo() # Defaults to UTC + >>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1 + >>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour) + >>> print(dt_aware) + 2000-01-01 00:00:00+01:00 + >>> print(dt_aware.astimezone(utc)) + 1999-12-31 23:00:00+00:00 + """ + if utc_offset == 0 and dst == 0 and tzname is None: + tzname = 'UTC' + self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day)) + self._tzname = tzname + self._dst = datetime.timedelta(dst.to_value(u.day)) + + def utcoffset(self, dt): + return self._utcoffset + + def tzname(self, dt): + return str(self._tzname) + + def dst(self, dt): + return self._dst + + +class TimeString(TimeUnique): + """ + Base class for string-like time representations. + + This class assumes that anything following the last decimal point to the + right is a fraction of a second. + + This is a reference implementation can be made much faster with effort. + """ + + def _check_val_type(self, val1, val2): + # Note: don't care about val2 for these classes + if val1.dtype.kind not in ('S', 'U'): + raise TypeError('Input values for {0} class must be strings' + .format(self.name)) + return val1, None + + def parse_string(self, timestr, subfmts): + """Read time from a single string, using a set of possible formats.""" + # Datetime components required for conversion to JD by ERFA, along + # with the default values. + components = ('year', 'mon', 'mday', 'hour', 'min', 'sec') + defaults = (None, 1, 1, 0, 0, 0) + # Assume that anything following "." on the right side is a + # floating fraction of a second. + try: + idot = timestr.rindex('.') + except Exception: + fracsec = 0.0 + else: + timestr, fracsec = timestr[:idot], timestr[idot:] + fracsec = float(fracsec) + + for _, strptime_fmt_or_regex, _ in subfmts: + if isinstance(strptime_fmt_or_regex, six.string_types): + try: + tm = time.strptime(timestr, strptime_fmt_or_regex) + except ValueError: + continue + else: + vals = [getattr(tm, 'tm_' + component) + for component in components] + + else: + tm = re.match(strptime_fmt_or_regex, timestr) + if tm is None: + continue + tm = tm.groupdict() + vals = [int(tm.get(component, default)) for component, default + in zip(components, defaults)] + + # Add fractional seconds + vals[-1] = vals[-1] + fracsec + return vals + else: + raise ValueError('Time {0} does not match {1} format' + .format(timestr, self.name)) + + def set_jds(self, val1, val2): + """Parse the time strings contained in val1 and set jd1, jd2""" + # Select subformats based on current self.in_subfmt + subfmts = self._select_subfmts(self.in_subfmt) + + iterator = np.nditer([val1, None, None, None, None, None, None], + op_dtypes=[val1.dtype] + 5*[np.intc] + [np.double]) + + for val, iy, im, id, ihr, imin, dsec in iterator: + iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = ( + self.parse_string(val.item(), subfmts)) + + jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), + *iterator.operands[1:]) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + def str_kwargs(self): + """ + Generator that yields a dict of values corresponding to the + calendar date and time for the internal JD values. + """ + scale = self.scale.upper().encode('ascii'), + iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision, + self.jd1, self.jd2) + + # Get the str_fmt element of the first allowed output subformat + _, _, str_fmt = self._select_subfmts(self.out_subfmt)[0] + + if '{yday:' in str_fmt: + has_yday = True + else: + has_yday = False + yday = None + + ihrs = ihmsfs[..., 0] + imins = ihmsfs[..., 1] + isecs = ihmsfs[..., 2] + ifracs = ihmsfs[..., 3] + for iy, im, id, ihr, imin, isec, ifracsec in np.nditer( + [iys, ims, ids, ihrs, imins, isecs, ifracs]): + if has_yday: + yday = datetime.datetime(iy, im, id).timetuple().tm_yday + + yield {'year': int(iy), 'mon': int(im), 'day': int(id), + 'hour': int(ihr), 'min': int(imin), 'sec': int(isec), + 'fracsec': int(ifracsec), 'yday': yday} + + def format_string(self, str_fmt, **kwargs): + """Write time to a string using a given format. + + By default, just interprets str_fmt as a format string, + but subclasses can add to this. + """ + return str_fmt.format(**kwargs) + + @property + def value(self): + # Select the first available subformat based on current + # self.out_subfmt + subfmts = self._select_subfmts(self.out_subfmt) + _, _, str_fmt = subfmts[0] + + # TODO: fix this ugly hack + if self.precision > 0 and str_fmt.endswith('{sec:02d}'): + str_fmt += '.{fracsec:0' + str(self.precision) + 'd}' + + # Try to optimize this later. Can't pre-allocate because length of + # output could change, e.g. year rolls from 999 to 1000. + outs = [] + for kwargs in self.str_kwargs(): + outs.append(str(self.format_string(str_fmt, **kwargs))) + + return np.array(outs).reshape(self.jd1.shape) + + def _select_subfmts(self, pattern): + """ + Return a list of subformats where name matches ``pattern`` using + fnmatch. + """ + + fnmatchcase = fnmatch.fnmatchcase + subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)] + if len(subfmts) == 0: + raise ValueError('No subformats match {0}'.format(pattern)) + return subfmts + + +class TimeISO(TimeString): + """ + ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...". + For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000. + + The allowed subformats are: + + - 'date_hms': date + hours, mins, secs (and optional fractional secs) + - 'date_hm': date + hours, mins + - 'date': date + """ + + name = 'iso' + subfmts = (('date_hms', + '%Y-%m-%d %H:%M:%S', + # XXX To Do - use strftime for output ?? + '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'), + ('date_hm', + '%Y-%m-%d %H:%M', + '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'), + ('date', + '%Y-%m-%d', + '{year:d}-{mon:02d}-{day:02d}')) + + def parse_string(self, timestr, subfmts): + # Handle trailing 'Z' for UTC time + if timestr.endswith('Z'): + if self.scale != 'utc': + raise ValueError("Time input terminating in 'Z' must have " + "scale='UTC'") + timestr = timestr[:-1] + return super(TimeISO, self).parse_string(timestr, subfmts) + + +class TimeISOT(TimeISO): + """ + ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...". + This is the same as TimeISO except for a "T" instead of space between + the date and time. + For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000. + + The allowed subformats are: + + - 'date_hms': date + hours, mins, secs (and optional fractional secs) + - 'date_hm': date + hours, mins + - 'date': date + """ + + name = 'isot' + subfmts = (('date_hms', + '%Y-%m-%dT%H:%M:%S', + '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), + ('date_hm', + '%Y-%m-%dT%H:%M', + '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'), + ('date', + '%Y-%m-%d', + '{year:d}-{mon:02d}-{day:02d}')) + + +class TimeYearDayTime(TimeISO): + """ + Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...". + The day-of-year (DOY) goes from 001 to 365 (366 in leap years). + For example, 2000:001:00:00:00.000 is midnight on January 1, 2000. + + The allowed subformats are: + + - 'date_hms': date + hours, mins, secs (and optional fractional secs) + - 'date_hm': date + hours, mins + - 'date': date + """ + + name = 'yday' + subfmts = (('date_hms', + '%Y:%j:%H:%M:%S', + '{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'), + ('date_hm', + '%Y:%j:%H:%M', + '{year:d}:{yday:03d}:{hour:02d}:{min:02d}'), + ('date', + '%Y:%j', + '{year:d}:{yday:03d}')) + + +class TimeFITS(TimeString): + """ + FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]][(SCALE[(REALIZATION)])]". + + ISOT with two extensions: + - Can give signed five-digit year (mostly for negative years); + - A possible time scale (and realization) appended in parentheses. + + Note: FITS supports some deprecated names for timescales; these are + translated to the formal names upon initialization. Furthermore, any + specific realization information is stored only as long as the time scale + is not changed. + + The allowed subformats are: + + - 'date_hms': date + hours, mins, secs (and optional fractional secs) + - 'date': date + - 'longdate_hms': as 'date_hms', but with signed 5-digit year + - 'longdate': as 'date', but with signed 5-digit year + + See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583). + """ + name = 'fits' + subfmts = ( + ('date_hms', + (r'(?P\d{4})-(?P\d\d)-(?P\d\d)T' + r'(?P\d\d):(?P\d\d):(?P\d\d(\.\d*)?)'), + '{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), + ('date', + r'(?P\d{4})-(?P\d\d)-(?P\d\d)', + '{year:04d}-{mon:02d}-{day:02d}'), + ('longdate_hms', + (r'(?P[+-]\d{5})-(?P\d\d)-(?P\d\d)T' + r'(?P\d\d):(?P\d\d):(?P\d\d(\.\d*)?)'), + '{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), + ('longdate', + r'(?P[+-]\d{5})-(?P\d\d)-(?P\d\d)', + '{year:+06d}-{mon:02d}-{day:02d}')) + # Add the regex that parses the scale and possible realization. + subfmts = tuple( + (subfmt[0], + subfmt[1] + r'(\((?P\w+)(\((?P\w+)\))?\))?', + subfmt[2]) for subfmt in subfmts) + _fits_scale = None + _fits_realization = None + + def parse_string(self, timestr, subfmts): + """Read time and set scale according to trailing scale codes.""" + # Try parsing with any of the allowed sub-formats. + for _, regex, _ in subfmts: + tm = re.match(regex, timestr) + if tm: + break + else: + raise ValueError('Time {0} does not match {1} format' + .format(timestr, self.name)) + tm = tm.groupdict() + if tm['scale'] is not None: + # If a scale was given, translate from a possible deprecated + # timescale identifier to the scale used by Time. + fits_scale = tm['scale'].upper() + scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower()) + if scale not in TIME_SCALES: + raise ValueError("Scale {0!r} is not in the allowed scales {1}" + .format(scale, sorted(TIME_SCALES))) + # If no scale was given in the initialiser, set the scale to + # that given in the string. Also store a possible realization, + # so we can round-trip (as long as no scale changes are made). + fits_realization = (tm['realization'].upper() + if tm['realization'] else None) + if self._fits_scale is None: + self._fits_scale = fits_scale + self._fits_realization = fits_realization + if self._scale is None: + self._scale = scale + if (scale != self.scale or fits_scale != self._fits_scale or + fits_realization != self._fits_realization): + raise ValueError("Input strings for {0} class must all " + "have consistent time scales." + .format(self.name)) + return [int(tm['year']), int(tm['mon']), int(tm['mday']), + int(tm.get('hour', 0)), int(tm.get('min', 0)), + float(tm.get('sec', 0.))] + + def format_string(self, str_fmt, **kwargs): + """Format time-string: append the scale to the normal ISOT format.""" + time_str = super(TimeFITS, self).format_string(str_fmt, **kwargs) + if self._fits_scale and self._fits_realization: + return '{0}({1}({2}))'.format(time_str, self._fits_scale, + self._fits_realization) + else: + return '{0}({1})'.format(time_str, self._scale.upper()) + + @property + def value(self): + """Convert times to strings, using signed 5 digit if necessary.""" + if 'long' not in self.out_subfmt: + # If we have times before year 0 or after year 9999, we can + # output only in a "long" format, using signed 5-digit years. + jd = self.jd1 + self.jd2 + if jd.min() < 1721425.5 or jd.max() >= 5373484.5: + self.out_subfmt = 'long' + self.out_subfmt + return super(TimeFITS, self).value + + +class TimeEpochDate(TimeFormat): + """ + Base class for support floating point Besselian and Julian epoch dates + """ + + def set_jds(self, val1, val2): + self._check_scale(self._scale) # validate scale. + epoch_to_jd = getattr(erfa, self.epoch_to_jd) + jd1, jd2 = epoch_to_jd(val1 + val2) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + @property + def value(self): + jd_to_epoch = getattr(erfa, self.jd_to_epoch) + return jd_to_epoch(self.jd1, self.jd2) + + +class TimeBesselianEpoch(TimeEpochDate): + """Besselian Epoch year as floating point value(s) like 1950.0""" + name = 'byear' + epoch_to_jd = 'epb2jd' + jd_to_epoch = 'epb' + + def _check_val_type(self, val1, val2): + """Input value validation, typically overridden by derived classes""" + if hasattr(val1, 'to') and hasattr(val1, 'unit'): + raise ValueError("Cannot use Quantities for 'byear' format, " + "as the interpretation would be ambiguous. " + "Use float with Besselian year instead. ") + + return super(TimeBesselianEpoch, self)._check_val_type(val1, val2) + + +class TimeJulianEpoch(TimeEpochDate): + """Julian Epoch year as floating point value(s) like 2000.0""" + name = 'jyear' + unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities + epoch_to_jd = 'epj2jd' + jd_to_epoch = 'epj' + + +class TimeEpochDateString(TimeString): + """ + Base class to support string Besselian and Julian epoch dates + such as 'B1950.0' or 'J2000.0' respectively. + """ + + def set_jds(self, val1, val2): + epoch_prefix = self.epoch_prefix + iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double]) + for val, years in iterator: + time_str = val.item() + try: + epoch_type, year_str = time_str[0], time_str[1:] + year = float(year_str) + if epoch_type.upper() != epoch_prefix: + raise ValueError + except (IndexError, ValueError): + raise ValueError('Time {0} does not match {1} format' + .format(time_str, self.name)) + else: + years[...] = year + + self._check_scale(self._scale) # validate scale. + epoch_to_jd = getattr(erfa, self.epoch_to_jd) + jd1, jd2 = epoch_to_jd(iterator.operands[-1]) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + @property + def value(self): + jd_to_epoch = getattr(erfa, self.jd_to_epoch) + years = jd_to_epoch(self.jd1, self.jd2) + # Use old-style format since it is a factor of 2 faster + str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f' + outs = [str_fmt % year for year in years.flat] + return np.array(outs).reshape(self.jd1.shape) + + +class TimeBesselianEpochString(TimeEpochDateString): + """Besselian Epoch year as string value(s) like 'B1950.0'""" + name = 'byear_str' + epoch_to_jd = 'epb2jd' + jd_to_epoch = 'epb' + epoch_prefix = 'B' + + +class TimeJulianEpochString(TimeEpochDateString): + """Julian Epoch year as string value(s) like 'J2000.0'""" + name = 'jyear_str' + epoch_to_jd = 'epj2jd' + jd_to_epoch = 'epj' + epoch_prefix = 'J' + + +class TimeDeltaFormatMeta(TimeFormatMeta): + _registry = TIME_DELTA_FORMATS + + +@six.add_metaclass(TimeDeltaFormatMeta) +class TimeDeltaFormat(TimeFormat): + """Base class for time delta representations""" + + def _check_scale(self, scale): + """ + Check that the scale is in the allowed list of scales, or is `None` + """ + if scale is not None and scale not in TIME_DELTA_SCALES: + raise ScaleValueError("Scale value '{0}' not in " + "allowed values {1}" + .format(scale, TIME_DELTA_SCALES)) + + return scale + + def set_jds(self, val1, val2): + self._check_scale(self._scale) # Validate scale. + self.jd1, self.jd2 = day_frac(val1, val2, divisor=1./self.unit) + + @property + def value(self): + return (self.jd1 + self.jd2) / self.unit + + +class TimeDeltaSec(TimeDeltaFormat): + """Time delta in SI seconds""" + name = 'sec' + unit = 1. / erfa.DAYSEC # for quantity input + + +class TimeDeltaJD(TimeDeltaFormat): + """Time delta in Julian days (86400 SI seconds)""" + name = 'jd' + unit = 1. + + +from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/formats.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/formats.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fb12f2a2813d8a8a77cbde936886cd28a71879a Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/formats.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/setup_package.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/setup_package.py new file mode 100644 index 0000000000000000000000000000000000000000..3cd9f7c3d928c2b9a57845c6438b77d8ca63de27 --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/setup_package.py @@ -0,0 +1,5 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def requires_2to3(): + return False diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/setup_package.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/setup_package.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b32a51399cd3bd66eadfb0730faf1b4cd448b4b Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/setup_package.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/tests/__init__.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/tests/__init__.pyc b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/tests/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65cab842bdc1df5aff14817fa125e91e3bc095a6 Binary files /dev/null and b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/tests/__init__.pyc differ diff --git a/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/tests/test_basic.py b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..84ccb7152b0ccd707654c30f665c8a8507863a8f --- /dev/null +++ b/googleAppEngine/scipy/env/lib/python2.7/site-packages/astropy/time/tests/test_basic.py @@ -0,0 +1,1155 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import copy +import functools +import datetime +from copy import deepcopy + +import pytest +import numpy as np + +from ...tests.helper import catch_warnings, remote_data +from ...extern import six +from ...extern.six.moves import zip +from ...utils import isiterable +from .. import Time, ScaleValueError, TIME_SCALES, TimeString, TimezoneInfo +from ...coordinates import EarthLocation +from ... import units as u +from ... import _erfa as erfa +try: + import pytz + HAS_PYTZ = True +except ImportError: + HAS_PYTZ = False + +allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0) +allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52, + atol=2. ** -52) # 20 ps atol +allclose_sec = functools.partial(np.allclose, rtol=2. ** -52, + atol=2. ** -52 * 24 * 3600) # 20 ps atol +allclose_year = functools.partial(np.allclose, rtol=2. ** -52, + atol=0.) # 14 microsec at current epoch + + +def setup_function(func): + func.FORMATS_ORIG = deepcopy(Time.FORMATS) + + +def teardown_function(func): + Time.FORMATS.clear() + Time.FORMATS.update(func.FORMATS_ORIG) + + +class TestBasic(): + """Basic tests stemming from initial example and API reference""" + + def test_simple(self): + times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'] + t = Time(times, format='iso', scale='utc') + assert (repr(t) == "