hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
444322f10b91e018e4a69d114b6a451fdf842aa0
| 1,427
|
py
|
Python
|
tests/supply_E3631A.py
|
BlancaCC/granadaSat_batteries
|
9302feddddc62c1d26ca4faa7684edd0eeb95aa1
|
[
"MIT"
] | null | null | null |
tests/supply_E3631A.py
|
BlancaCC/granadaSat_batteries
|
9302feddddc62c1d26ca4faa7684edd0eeb95aa1
|
[
"MIT"
] | null | null | null |
tests/supply_E3631A.py
|
BlancaCC/granadaSat_batteries
|
9302feddddc62c1d26ca4faa7684edd0eeb95aa1
|
[
"MIT"
] | null | null | null |
class SupplyE3631A:
def __init__(self, instr):
if 'HEWLETT-PACKARD,E3631A' not in instr.query("*IDN?"):
raise NameError('Device is not HEWLETT-PACKARD E3631A')
else:
self.instr = instr
def output_on(self):
self.instr.write('OUTP ON')
def output_off(self):
self.instr.write('OUTP OFF')
def select_output(self, gpib):
if gpib == '+6V':
self.instr.write('INST P6V')
elif gpib == '+25V':
self.instr.write('INST P25V')
elif gpib == '-25V':
self.instr.write('INST N25V')
else:
raise NameError('Not an argument')
def limit_current(self, curr):
self.instr.write(f'CURR {curr}')
def set_voltage(self, volt):
self.instr.write(f'VOLT {volt}')
def current(self) -> 'Amperes':
return float(self.instr.query('MEAS:CURR?'))
def voltage(self) -> 'Volts':
return float(self.instr.query('MEAS:VOLT?'))
def write_screen(self, txt):
self.instr.write(f'DISP:TEXT "{txt}"')
if __name__ == '__main__':
import visa
rm = visa.ResourceManager()
instr = rm.open_resource('GPIB0::3::INSTR')
sup = SupplyE3631A(instr)
sup.output_off()
sup.limit_current(2.2)
sup.set_voltage(5.4)
sup.output_on()
while True:
print(f'Voltage: {sup.voltage()}, Current: {sup.current()}')
| 27.442308
| 68
| 0.573231
|
3d9c00068e7b7a9d949ce589b44f27228e107591
| 268
|
py
|
Python
|
code/ui_database/simple.py
|
evemorgen/PlasticMonkeysCansat
|
5f6f8d52ac65de408c7e8ff082c20bebcb40f5e4
|
[
"MIT"
] | 10
|
2018-10-16T15:53:09.000Z
|
2020-08-19T06:06:23.000Z
|
code/ui_database/simple.py
|
evemorgen/PlasticMonkeysCansat
|
5f6f8d52ac65de408c7e8ff082c20bebcb40f5e4
|
[
"MIT"
] | 23
|
2018-10-13T16:00:43.000Z
|
2019-04-27T19:08:58.000Z
|
code/ui_database/simple.py
|
evemorgen/PlasticMonkeysCansat
|
5f6f8d52ac65de408c7e8ff082c20bebcb40f5e4
|
[
"MIT"
] | 2
|
2018-11-04T17:55:53.000Z
|
2018-11-18T17:33:27.000Z
|
from tinydb import TinyDB, Query
db = TinyDB('questions.json')
q = Query()
#db.insert({'lan':'PL','num': 1, 'type':'general', 'question': 'Czy potrzebujesz pomocy'})
result = db.get((q.lan == 'PL') & (q.num == 1))
question = result.get('question')
print question
| 20.615385
| 90
| 0.63806
|
98980e58d4a8c9f21c9b33c0ea1f92394363e620
| 675
|
py
|
Python
|
backend/migrations/versions/0f846b00d0db_add_position_field.py
|
cclauss/lineage
|
065cf182095cd7ff3fe5c9f38e1009f1f2a81c19
|
[
"MIT"
] | 1
|
2021-09-06T15:26:46.000Z
|
2021-09-06T15:26:46.000Z
|
backend/migrations/versions/0f846b00d0db_add_position_field.py
|
cclauss/lineage
|
065cf182095cd7ff3fe5c9f38e1009f1f2a81c19
|
[
"MIT"
] | null | null | null |
backend/migrations/versions/0f846b00d0db_add_position_field.py
|
cclauss/lineage
|
065cf182095cd7ff3fe5c9f38e1009f1f2a81c19
|
[
"MIT"
] | 1
|
2020-11-12T05:23:09.000Z
|
2020-11-12T05:23:09.000Z
|
"""Add position field
Revision ID: 0f846b00d0db
Revises: f0c6a1421c2d
Create Date: 2020-09-29 18:40:51.646237
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0f846b00d0db'
down_revision = 'f0c6a1421c2d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('timeline_node', sa.Column('position', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('timeline_node', 'position')
# ### end Alembic commands ###
| 23.275862
| 86
| 0.699259
|
24f5414813e2f78f1568794763da4784c301b042
| 8,549
|
py
|
Python
|
tests/test_orders.py
|
NYU-DevOps-Spring2018-Orders/orders
|
64e4f570871a66ce30d62e50977fc5d4a17cdeab
|
[
"Apache-2.0"
] | 2
|
2018-06-25T06:39:47.000Z
|
2018-10-29T17:09:33.000Z
|
tests/test_orders.py
|
NYU-DevOps-Spring2018-Orders/orders
|
64e4f570871a66ce30d62e50977fc5d4a17cdeab
|
[
"Apache-2.0"
] | 100
|
2018-02-18T16:52:08.000Z
|
2018-05-02T19:32:01.000Z
|
tests/test_orders.py
|
NYU-DevOps-Spring2018-Orders/orders
|
64e4f570871a66ce30d62e50977fc5d4a17cdeab
|
[
"Apache-2.0"
] | 4
|
2018-03-03T03:37:27.000Z
|
2018-04-11T22:11:47.000Z
|
"""
Test cases for Order Model
Test cases can be run with:
nosetests
coverage report -m
"""
import os
import unittest
from app import app, db
from app.models import Item, Order, DataValidationError
from datetime import datetime
from werkzeug.exceptions import NotFound
DATABASE_URI = os.getenv('DATABASE_URI', 'mysql+pymysql://root@localhost:3306/development')
######################################################################
# T E S T C A S E S
######################################################################
class TestOrders(unittest.TestCase):
""" Test Cases for Orders """
@classmethod
def setUpClass(cls):
""" These run once per Test suite """
app.debug = False
# Set up the test database
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
Order.init_db()
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
def tearDown(self):
db.session.remove()
db.drop_all()
def test_create_an_order(self):
""" Create an order and assert that it exists """
date = datetime.now()
order = Order(customer_id=1, date=date, status = 'processing')
self.assertEqual(order.id, None)
self.assertEqual(order.customer_id, 1)
self.assertEqual(order.date, date)
self.assertEqual(order.status, 'processing')
def test_add_an_order(self):
""" Create an Order and add it to the database """
date = datetime.now()
orders = Order.all()
self.assertEqual(orders, [])
order = Order(customer_id=1, date=date, status ='processing')
self.assertEqual(order.id, None)
order.save()
self.assertEqual(order.id, 1)
orders = Order.all()
self.assertEqual(len(orders), 1)
def test_update_an_order(self):
""" Update an Order """
date = datetime.now()
order = Order(customer_id=1, date=date, status = 'processing')
order.save()
self.assertEqual(order.id, 1)
order.shipped = False
order.save()
orders = Order.all()
self.assertEqual(len(orders), 1)
self.assertEqual(orders[0].status, 'processing')
def test_delete_an_order(self):
""" Delete an Order """
date = datetime.now()
order = Order(customer_id=1, date=date, status = 'processing')
order.save()
self.assertEqual(len(Order.all()), 1)
order.delete()
self.assertEqual(len(Order.all()), 0)
def test_serialize_an_order(self):
""" Test serialization of an Order """
date = datetime.now()
order = Order(customer_id=1, date=date, status = 'processing')
data = order.serialize()
self.assertNotEqual(data, None)
self.assertIn('id', data)
self.assertEqual(data['id'], None)
self.assertIn('customer_id', data)
self.assertEqual(data['customer_id'], 1)
self.assertIn('date', data)
self.assertEqual(data['date'], date)
self.assertIn('status', data)
self.assertEqual(data['status'], 'processing')
def test_deserialize_an_order(self):
""" Test deserialization of an Order """
# using this format to match format from our html forms
date = "2018-04-23T11:11"
# we return a datetime object with a different format
date_resp = "2018-04-23 11:11:00"
data = {"id": 1, "customer_id": 1, "date": date, "status": 'processing'}
order = Order()
order.deserialize(data)
self.assertNotEqual(order, None)
self.assertEqual(order.id, None)
self.assertEqual(order.customer_id, 1)
self.assertEqual(str(order.date), date_resp)
self.assertEqual(order.status, 'processing')
def test_fetch_all_orders(self):
""" Test fetching all Orders """
date = datetime.now()
order = Order(customer_id=1, date=date, status='processing')
order.save()
order2 = Order(customer_id=2, date=date, status = 'processing')
order2.save()
Order.all()
self.assertEqual(len(Order.all()), 2)
def test_get_an_order(self):
""" Get an Order by id """
date = datetime.now()
date_converted = str(date.year) + "-" + str(date.month) + "-" + str(date.day) + "T" + \
str(date.hour) + ":" + str(date.minute)
date_converted = datetime.strptime(date_converted, "%Y-%m-%dT%H:%M")
order = Order(customer_id=1, date=date_converted, status = 'processing')
order.save()
def test_get_or_404(self):
""" Get_or_404 function with nonexistent ID """
self.assertRaises(NotFound, Order.get_or_404, 1)
def test_find_by_customer_id(self):
""" Find orders by customer_id """
date = datetime.now()
date_converted = str(date.year) + "-" + str(date.month) + "-" + str(date.day) + "T" + \
str(date.hour) + ":" + str(date.minute)
date = datetime.strptime(date_converted, "%Y-%m-%dT%H:%M")
order = Order(customer_id=1, date=date, status = 'processing')
order.save()
order1 = Order.find_by_customer_id(order.customer_id)
self.assertEqual(order1[0].customer_id, order.customer_id)
self.assertEqual(order1[0].date, date)
def test_find_by_date(self):
""" Find orders by date """
date = datetime.now()
date_converted = str(date.year) + "-" + str(date.month) + "-" + str(date.day) + "T" + \
str(date.hour) + ":" + str(date.minute)
date = datetime.strptime(date_converted, "%Y-%m-%dT%H:%M")
order = Order(customer_id=1, date=date, status = 'processing')
order.save()
order1 = Order(customer_id=2, date=date, status = 'processing')
order1.save()
order2 = Order.find_by_date(date_converted)
self.assertEqual(order2[0].customer_id, order.customer_id)
self.assertEqual(order2[0].status, order.status)
self.assertEqual(order2[0].date, order.date)
def test_find_by_status(self):
""" Find orders by status """
date = datetime.now()
order = Order(customer_id=1, date=date, status = 'processing')
order.save()
order1 = Order(customer_id=2, date=date, status = 'processing')
order1.save()
order2 = Order.find_by_status('processing')
self.assertEqual(order2[0].customer_id, order.customer_id)
self.assertEqual(order2[0].status, order.status)
self.assertEqual(order2[0].date, order.date)
def test_non_dict_raises_error(self):
""" Pass invalid data structure deserialize """
data = [1,2,3]
order = Order()
with self.assertRaises(DataValidationError):
order.deserialize(data)
def test_invalid_key_raises_error(self):
""" Try to pass invalid key """
date = datetime.now()
data = {"id": 1, "date": date, "status": 'processing'}
with self.assertRaises(DataValidationError):
order = Order()
order.deserialize(data)
def test_repr(self):
""" Test that string representation is correct """
date = datetime.now()
order = Order(customer_id=1, date=date, status = 'processing')
order.save()
self.assertEqual(order.__repr__(), "<Order>")
def test_remove_all(self):
""" Tests removing Orders from the database """
date = datetime.now()
order = Order(customer_id=1, date=date, status = 'processing').save()
order = Order(customer_id=2, date=date, status = 'processing').save()
order1 = Order()
order1 = order1.find_by_customer_id(1)[0]
order2 = Order()
order2 = order2.find_by_customer_id(2)[0]
item = Item(order_id=order1.id, product_id=1, name='hammer', quantity=1, price=11.50).save()
item = Item(order_id=order1.id, product_id=2, name='toilet paper', quantity=2, price=2.50).save()
item = Item(order_id=order2.id, product_id=3, name='beer', quantity=2, price=10.50).save()
Order.remove_all()
self.assertEqual(db.session.query(Order).count(), 0)
######################################################################
# M A I N
######################################################################
if __name__ == '__main__':
unittest.main()
| 34.333333
| 105
| 0.587554
|
7960801277784409e8be96cd4e0e7304efeb6b75
| 3,169
|
py
|
Python
|
project_euler/problem_008/sol1.py
|
NavpreetDevpuri/Python
|
7ef5ae66d777e8ed702993c6aa9270e0669cb0c6
|
[
"MIT"
] | 145,614
|
2016-07-21T05:40:05.000Z
|
2022-03-31T22:17:22.000Z
|
project_euler/problem_008/sol1.py
|
Agha-Muqarib/Python
|
04f156a8973d6156a4357e0717d9eb0aa264d086
|
[
"MIT"
] | 3,987
|
2016-07-28T17:31:25.000Z
|
2022-03-30T23:07:46.000Z
|
project_euler/problem_008/sol1.py
|
Agha-Muqarib/Python
|
04f156a8973d6156a4357e0717d9eb0aa264d086
|
[
"MIT"
] | 40,014
|
2016-07-26T15:14:41.000Z
|
2022-03-31T22:23:03.000Z
|
"""
Project Euler Problem 8: https://projecteuler.net/problem=8
Largest product in a series
The four adjacent digits in the 1000-digit number that have the greatest
product are 9 × 9 × 8 × 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that have the
greatest product. What is the value of this product?
"""
import sys
N = """73167176531330624919225119674426574742355349194934\
96983520312774506326239578318016984801869478851843\
85861560789112949495459501737958331952853208805511\
12540698747158523863050715693290963295227443043557\
66896648950445244523161731856403098711121722383113\
62229893423380308135336276614282806444486645238749\
30358907296290491560440772390713810515859307960866\
70172427121883998797908792274921901699720888093776\
65727333001053367881220235421809751254540594752243\
52584907711670556013604839586446706324415722155397\
53697817977846174064955149290862569321978468622482\
83972241375657056057490261407972968652414535100474\
82166370484403199890008895243450658541227588666881\
16427171479924442928230863465674813919123162824586\
17866458359124566529476545682848912883142607690042\
24219022671055626321111109370544217506941658960408\
07198403850962455444362981230987879927244284909188\
84580156166097919133875499200524063689912560717606\
05886116467109405077541002256983155200055935729725\
71636269561882670428252483600823257530420752963450"""
def solution(n: str = N) -> int:
"""
Find the thirteen adjacent digits in the 1000-digit number n that have
the greatest product and returns it.
>>> solution("13978431290823798458352374")
609638400
>>> solution("13978431295823798458352374")
2612736000
>>> solution("1397843129582379841238352374")
209018880
"""
largest_product = -sys.maxsize - 1
for i in range(len(n) - 12):
product = 1
for j in range(13):
product *= int(n[i + j])
if product > largest_product:
largest_product = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 38.180723
| 74
| 0.84443
|
4072e1ad1100f866ae38639a995a1ea0aec45aac
| 80,384
|
py
|
Python
|
poppy/optics.py
|
remorgan123/poppy
|
70f70a1ab96a88b602d67e02161dc8265f947bcc
|
[
"BSD-3-Clause"
] | 141
|
2015-02-18T09:18:51.000Z
|
2022-02-23T20:18:00.000Z
|
poppy/optics.py
|
ivalaginja/poppy
|
33c8857d64b5394375135ea9918e36417a76752c
|
[
"BSD-3-Clause"
] | 227
|
2015-01-22T04:30:04.000Z
|
2022-01-13T19:39:19.000Z
|
poppy/optics.py
|
shanosborne/poppy
|
2010aaac2e738ac347186e28b2258e489a22deec
|
[
"BSD-3-Clause"
] | 51
|
2015-02-25T22:51:22.000Z
|
2021-09-15T03:48:08.000Z
|
import numpy as np
import scipy.special
import scipy.ndimage.interpolation
import matplotlib
import astropy.io.fits as fits
import astropy.units as u
import warnings
import logging
from . import utils
from . import conf
from . import accel_math
from .poppy_core import OpticalElement, Wavefront, BaseWavefront, PlaneType, _RADIANStoARCSEC
from .accel_math import _exp, _r, _float, _complex
from . import geometry
if accel_math._USE_NUMEXPR:
import numexpr as ne
_log = logging.getLogger('poppy')
__all__ = ['AnalyticOpticalElement', 'ScalarTransmission', 'InverseTransmission',
'BandLimitedCoron', 'BandLimitedCoronagraph', 'IdealFQPM', 'CircularPhaseMask', 'RectangularFieldStop', 'SquareFieldStop',
'AnnularFieldStop', 'HexagonFieldStop',
'CircularOcculter', 'BarOcculter', 'FQPM_FFT_aligner', 'CircularAperture',
'HexagonAperture', 'MultiHexagonAperture', 'NgonAperture', 'RectangleAperture',
'SquareAperture', 'SecondaryObscuration', 'AsymmetricSecondaryObscuration',
'ThinLens', 'GaussianAperture', 'KnifeEdge', 'CompoundAnalyticOptic', 'fixed_sampling_optic']
# ------ Generic Analytic elements -----
class AnalyticOpticalElement(OpticalElement):
""" Defines an abstract analytic optical element, i.e. one definable by
some formula rather than by an input OPD or pupil file.
This class is useless on its own; instead use its various subclasses
that implement appropriate get_opd and/or get_transmission functions.
It exists mostly to provide some behaviors & initialization common to
all analytic optical elements.
Parameters
----------
name, verbose, oversample, planetype : various
Same as for OpticalElement
transmission, opd : string
These are *not allowed* for Analytic optical elements, and this class will raise an
error if you try to set one.
shift_x, shift_y : Optional floats
Translations of this optic, given in meters relative to the optical
axis for pupil plane elements, or arcseconds relative to the optical axis
for image plane elements.
rotation : Optional float
Rotation of the optic around its center, given in degrees
counterclockwise. Note that if you apply both shift and rotation,
the optic rotates around its own center, rather than the optical
axis.
"""
def __init__(self, shift_x=None, shift_y=None, rotation=None,
inclination_x=None, inclination_y=None,
**kwargs):
OpticalElement.__init__(self, **kwargs)
if shift_x is not None: self.shift_x = shift_x
if shift_y is not None: self.shift_y = shift_y
if rotation is not None: self.rotation = rotation
if inclination_x is not None: self.inclination_x = inclination_x
if inclination_y is not None: self.inclination_y = inclination_y
if getattr(self, 'inclination_x', 0) != 0 and getattr(self, 'inclination_y', 0) != 0:
warnings.warn("It is physically inconsistent to set inclinations on both X and Y at the same time.")
if np.abs(getattr(self, 'inclination_x', 0)) > 90 or np.abs(getattr(self, 'inclination_y', 0)) > 90:
warnings.warn("Inclinations should be within the range -90 to 90 degrees")
# self.shape = None # no explicit shape required
self.pixelscale = None
@property
def shape(self): # Analytic elements don't have shape
return None
def __str__(self):
if self.planetype == PlaneType.pupil:
return "Pupil plane: " + self.name
elif self.planetype == PlaneType.image:
return "Image plane: " + self.name
else:
return "Optic: " + self.name
# The following two functions should be replaced by derived subclasses
# but we provide a default of perfect transmission and zero OPD.
# Each must return something which is a numpy ndarray.
def get_opd(self, wave):
return np.zeros(wave.shape, dtype=_float())
def get_transmission(self, wave):
""" Note that this is the **amplitude** transmission, not the
total intensity transmission. """
return np.ones(wave.shape, dtype=_float())
# noinspection PyUnusedLocal
def get_phasor(self, wave):
""" Compute a complex phasor from an OPD, given a wavelength.
The returned value should be the complex phasor array as appropriate for
multiplying by the wavefront amplitude.
Parameters
----------
wave : float or obj
either a scalar wavelength or a Wavefront object
"""
if isinstance(wave, BaseWavefront):
wavelength = wave.wavelength
else:
wavelength = wave
scale = 2. * np.pi / wavelength.to(u.meter).value
if accel_math._USE_NUMEXPR:
trans = self.get_transmission(wave)
opd = self.get_opd(wave)
# we first multiply the two scalars, for a slight performance gain
scalars = 1.j * scale
# warning, numexpr exp is crash-prone if fed complex64, so we
# leave the scalars variable as np.complex128 for reliability
result = ne.evaluate("trans * exp( opd * scalars)")
# TODO if single-precision, need to cast the result back to that
# to work around a bug
# Not sure why numexpr is casting up to complex128
# see https://github.com/pydata/numexpr/issues/155
# (Yes this is inefficient to do math as doubles if in single mode, but
# numexpr is still a net win)
if conf.double_precision:
return result
else:
return np.asarray(result, _complex())
else:
return self.get_transmission(wave) * np.exp(1.j * self.get_opd(wave) * scale)
@utils.quantity_input(wavelength=u.meter)
def sample(self, wavelength=1e-6 * u.meter, npix=512, grid_size=None, what='amplitude',
return_scale=False, phase_unit='waves'):
""" Sample the Analytic Optic onto a grid and return the array
Parameters
----------
wavelength : astropy.units.Quantity or float
Wavelength (in meters if unit not given explicitly)
npix : integer
Number of pixels for sampling the array
grid_size : float
Field of view grid size (diameter) for sampling the optic, in meters for
pupil plane optics and arcseconds for image planes. Default value is
taken from the optic's properties, if defined. Otherwise defaults to
6.5 meters or 2 arcseconds depending on plane.
what : string
What to return: optic 'amplitude' transmission, 'intensity' transmission,
'phase', or 'opd'. Note that optical path difference, OPD, is given in meters.
phase_unit : string
Unit for returned phase array IF what=='phase'. One of 'radians', 'waves', 'meters'.
('meters' option is deprecated; use what='opd' instead.)
return_scale : float
if True, will return a tuple containing the desired array and a float giving the
pixel scale.
"""
if self.planetype != PlaneType.image:
if grid_size is not None:
diam = grid_size if isinstance(grid_size, u.Quantity) else grid_size * u.meter
elif hasattr(self, '_default_display_size'):
diam = self._default_display_size
elif hasattr(self, 'pupil_diam'):
diam = self.pupil_diam * 1
else:
diam = 1.0 * u.meter
w = Wavefront(wavelength=wavelength, npix=npix, diam=diam)
pixel_scale = diam / (npix * u.pixel)
else:
if grid_size is not None:
fov = grid_size if isinstance(grid_size, u.Quantity) else grid_size * u.arcsec
elif hasattr(self, '_default_display_size'):
fov = self._default_display_size
else:
fov = 4 * u.arcsec
pixel_scale = fov / (npix * u.pixel)
w = Wavefront(wavelength=wavelength, npix=npix, pixelscale=pixel_scale)
_log.info("Computing {0} for {1} sampled onto {2} pixel grid with pixelscale {3}".format(what, self.name, npix, pixel_scale))
if what == 'amplitude':
output_array = self.get_transmission(w)
elif what == 'intensity':
output_array = self.get_transmission(w) ** 2
elif what == 'phase':
if phase_unit == 'radians':
output_array = np.angle(phasor) * 2 * np.pi / wavelength
elif phase_unit == 'waves':
output_array = self.get_opd(w) / wavelength
elif phase_unit == 'meters':
warnings.warn("'phase_unit' parameter has been deprecated. Use what='opd' instead.",
category=DeprecationWarning)
output_array = self.get_opd(w)
else:
warnings.warn("'phase_unit' parameter has been deprecated. Use what='opd' instead.",
category=DeprecationWarning)
raise ValueError('Invalid/unknown phase_unit: {}. Must be one of '
'[radians, waves, meters]'.format(phase_unit))
elif what == 'opd':
output_array = self.get_opd(w)
elif what == 'complex':
output_array = self.get_phasor(w)
else:
raise ValueError('Invalid/unknown what to sample: {}. Must be one of '
'[amplitude, intensity, phase, opd, complex]'.format(what))
if return_scale:
return output_array, pixel_scale
else:
return output_array
@utils.quantity_input(wavelength=u.meter)
def to_fits(self, outname=None, what='amplitude', wavelength=1e-6 * u.meter, npix=512, **kwargs):
""" Save an analytic optic computed onto a grid to a FITS file
The FITS file is returned to the calling function, and may optionally be
saved directly to disk.
Parameters
------------
what : string
What quantity to save. See the sample function of this class
wavelength : float
Wavelength in meters.
npix : integer
Number of pixels.
outname : string, optional
Filename to write out a FITS file to disk
See the sample() function for additional optional parameters.
"""
try:
from .version import version
except ImportError:
version = ''
kwargs['return_scale'] = True
if what == 'complex':
raise ValueError("FITS cannot handle complex arrays directly. Save the amplitude and opd separately.")
output_array, pixelscale = self.sample(wavelength=wavelength, npix=npix, what=what,
**kwargs)
long_contents = {'amplitude': "Electric field amplitude transmission",
'intensity': "Electric field intensity transmission",
'opd': "Optical path difference",
'phase': "Wavefront phase delay"}
phdu = fits.PrimaryHDU(output_array)
phdu.header['OPTIC'] = (self.name, "Descriptive name of this optic")
phdu.header['NAME'] = self.name
phdu.header['SOURCE'] = 'Computed with POPPY'
phdu.header['VERSION'] = (version, "software version of POPPY")
phdu.header['CONTENTS'] = (what, long_contents[what])
phdu.header['PLANETYP'] = (self.planetype.value, "0=unspecified, 1=pupil, 2=image, 3=detector, 4=rot")
if self.planetype == PlaneType.image:
phdu.header['PIXELSCL'] = (pixelscale.to(u.arcsec / u.pixel).value, 'Image plane pixel scale in arcsec/pix')
outFITS[0].header['PIXUNIT'] = ('arcsecond', "Unit for PIXELSCL")
else:
phdu.header['PUPLSCAL'] = (pixelscale.to(u.meter / u.pixel).value, 'Pupil plane pixel scale in meter/pix')
phdu.header['PIXELSCL'] = (phdu.header['PUPLSCAL'], 'Pupil plane pixel scale in meter/pix')
phdu.header['PIXUNIT'] = ('meter', "Unit for PIXELSCL")
if what == 'opd':
phdu.header['BUNIT'] = ('meter', "Optical Path Difference is given in meters.")
if hasattr(self, 'shift_x'):
phdu.header['SHIFTX'] = (self.shift_x, "X axis shift of input optic")
if hasattr(self, 'shift_y'):
phdu.header['SHIFTY'] = (self.shift_y, "Y axis shift of input optic")
if hasattr(self, 'rotation'):
phdu.header['ROTATION'] = (self.rotation, "Rotation of input optic, in deg")
hdul = fits.HDUList(hdus=[phdu])
if outname is not None:
phdu.writeto(outname, overwrite=True)
_log.info("Output written to " + outname)
return hdul
def get_coordinates(self, wave):
"""Get coordinates of this optic, optionally including shifts
Method: Calls the supplied wave object's coordinates() method,
then checks for the existence of the following attributes:
"shift_x", "shift_y", "rotation", "inclination_x", "inclination_y"
If any of them are present, then the coordinates are modified accordingly.
Shifts are given in meters for pupil optics and arcseconds for image
optics. Rotations and inclinations are given in degrees.
For multiple transformations, the order of operations is:
shift, rotate, incline.
"""
y, x = wave.coordinates()
if hasattr(self, "shift_x"):
x -= float(self.shift_x)
if hasattr(self, "shift_y"):
y -= float(self.shift_y)
if hasattr(self, "rotation"):
angle = np.deg2rad(self.rotation)
xp = np.cos(angle) * x + np.sin(angle) * y
yp = -np.sin(angle) * x + np.cos(angle) * y
x = xp
y = yp
# inclination around X axis rescales Y, and vice versa:
if hasattr(self, "inclination_x"):
y /= np.cos(np.deg2rad(self.inclination_x))
if hasattr(self, "inclination_y"):
x /= np.cos(np.deg2rad(self.inclination_y))
return y, x
class ScalarTransmission(AnalyticOpticalElement):
""" Uniform transmission between 0 and 1.0 in intensity.
Either a null optic (empty plane) or some perfect ND filter...
But most commonly this is just used as a null optic placeholder """
def __init__(self, name=None, transmission=1.0, **kwargs):
if name is None:
name = ("-empty-" if transmission == 1.0 else
"Scalar Transmission of {0}".format(transmission))
AnalyticOpticalElement.__init__(self, name=name, **kwargs)
self.transmission = float(transmission)
self.wavefront_display_hint = 'intensity'
def get_transmission(self, wave):
res = np.empty(wave.shape, dtype=_float())
res.fill(self.transmission)
return res
class InverseTransmission(AnalyticOpticalElement):
""" Given any arbitrary OpticalElement with transmission T(x,y)
return the inverse transmission 1 - T(x,y)
This is a useful ingredient in the SemiAnalyticCoronagraph algorithm.
"""
def __init__(self, optic=None):
super(InverseTransmission, self).__init__()
if optic is None or not hasattr(optic, 'get_transmission'):
raise ValueError("Need to supply an valid optic to invert!")
self.uninverted_optic = optic
self.name = "1 - " + optic.name
self.planetype = optic.planetype
self.pixelscale = optic.pixelscale
self.oversample = optic.oversample
if hasattr(self.uninverted_optic, '_default_display_size'):
self._default_display_size = self.uninverted_optic._default_display_size
@property
def shape(self): # override parent class shape function
return self.uninverted_optic.shape
def get_transmission(self, wave):
return 1 - self.uninverted_optic.get_transmission(wave)
def get_opd(self, wave):
return self.uninverted_optic.get_opd(wave)
def display(self, **kwargs):
if isinstance(self.uninverted_optic, AnalyticOpticalElement):
AnalyticOpticalElement.display(self, **kwargs)
else:
OpticalElement.display(self, **kwargs)
# ------ Analytic Image Plane elements (coordinates in arcsec) -----
class AnalyticImagePlaneElement(AnalyticOpticalElement):
""" Parent virtual class for AnalyticOptics which are
dimensioned in angular units such as arcseconds, rather
than physical length units such as meters.
"""
def __init__(self, name='Generic image plane optic', *args, **kwargs):
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.image, *args, **kwargs)
self.wavefront_display_hint = 'intensity' # preferred display for wavefronts at this plane
class BandLimitedCoronagraph(AnalyticImagePlaneElement):
""" Defines an ideal band limited coronagraph occulting mask.
Parameters
----------
name : string
Descriptive name
kind : string
Either 'circular' or 'linear'. The linear ones are custom shaped to NIRCAM's design
with flat bits on either side of the linear tapered bit.
Also includes options 'nircamcircular' and 'nircamwedge' specialized for the
JWST NIRCam occulters, including the off-axis ND acq spots and the changing
width of the wedge occulter.
sigma : float
The numerical size parameter, as specified in Krist et al. 2009 SPIE
wavelength : float
Wavelength this BLC is optimized for, only for the linear ones.
"""
allowable_kinds = ['circular', 'linear']
""" Allowable types of BLC supported by this class"""
@utils.quantity_input(wavelength=u.meter)
def __init__(self, name="unnamed BLC", kind='circular', sigma=1, wavelength=None, **kwargs):
AnalyticImagePlaneElement.__init__(self, name=name, **kwargs)
self.kind = kind.lower() # either circular or linear
if self.kind in ['nircamwedge', 'nircamcircular']:
import warnings
warnings.warn('JWST NIRCam specific functionality in poppy.BandLimitedCoron is moving to ' +
'webbpsf.NIRCam_BandLimitedCoron. The "nircamwedge" and "nircamcircular" options ' +
'in poppy will be removed in a future version of poppy.', DeprecationWarning)
elif self.kind not in self.allowable_kinds:
raise ValueError("Invalid value for kind of BLC: " + self.kind)
self.sigma = float(sigma) # size parameter. See section 2.1 of Krist et al. SPIE 2007, 2009
if wavelength is not None:
self.wavelength = float(wavelength) # wavelength, for selecting the
# linear wedge option only
self._default_display_size = 20. * u.arcsec # default size for onscreen display, sized for NIRCam
def get_transmission(self, wave):
""" Compute the amplitude transmission appropriate for a BLC for some given pixel spacing
corresponding to the supplied Wavefront.
Based on the Krist et al. SPIE paper on NIRCam coronagraph design
Note that the equations in Krist et al specify the intensity transmission of the occulter,
but what we want to return here is the amplitude transmittance. That is the square root
of the intensity, of course, so the equations as implemented here all differ from those
written in Krist's SPIE paper by lacking an exponential factor of 2. Thanks to John Krist
for pointing this out.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("BLC get_transmission must be called with a Wavefront to define the spacing")
assert (wave.planetype == PlaneType.image)
y, x = self.get_coordinates(wave)
if self.kind == 'circular':
# larger sigma implies narrower peak? TBD verify if this is correct
#
r = _r(x, y)
sigmar = self.sigma * r
sigmar.clip(np.finfo(sigmar.dtype).tiny, out=sigmar) # avoid divide by zero -> NaNs
self.transmission = (1 - (2 * scipy.special.jn(1, sigmar) / sigmar) ** 2)
self.transmission[r == 0] = 0 # special case center point (value based on L'Hopital's rule)
elif self.kind == 'nircamcircular':
# larger sigma implies narrower peak? TBD verify if this is correct
#
r = _r(x, y)
sigmar = self.sigma * r
sigmar.clip(np.finfo(sigmar.dtype).tiny, out=sigmar) # avoid divide by zero -> NaNs
self.transmission = (1 - (2 * scipy.special.jn(1, sigmar) / sigmar) ** 2)
# add in the ND squares. Note the positions are not exactly the same in the two wedges.
# See the figures in Krist et al. of how the 6 ND squares are spaced among the 5
# corongraph regions
# Also add in the opaque border of the coronagraph mask holder.
if self.sigma > 4:
# MASK210R has one in the corner and one half in the other corner
wnd = np.where(
(y > 5) &
(
((x < -5) & (x > -10)) |
((x > 7.5) & (x < 12.5))
)
)
wborder = np.where((np.abs(y) > 10) | (x < -10)) # left end of mask holder
else:
# the others have two halves on in each corner.
wnd = np.where(
(y > 5) &
(np.abs(x) > 7.5) &
(np.abs(x) < 12.5)
)
wborder = np.where(np.abs(y) > 10)
self.transmission[wnd] = np.sqrt(1e-3)
self.transmission[wborder] = 0
self.transmission[r == 0] = 0 # special case center point (value based on L'Hopital's rule)
elif self.kind == 'linear':
sigmar = self.sigma * np.abs(y)
sigmar.clip(np.finfo(sigmar.dtype).tiny, out=sigmar) # avoid divide by zero -> NaNs
self.transmission = (1 - (np.sin(sigmar) / sigmar) ** 2)
elif self.kind == 'nircamwedge':
# This is hard-coded to the wedge-plus-flat-regions shape for NIRCAM
# we want a scale factor that goes from 2 to 6 with 1/5th of it as a fixed part on
# either end
# scalefact = np.linspace(1,7, x.shape[1]).clip(2,6)
# the scale fact should depent on X coord in arcsec, scaling across a 20 arcsec FOV.
# map flat regions to 2.5 arcsec each?
# map -7.5 to 2, +7.5 to 6. slope is 4/15, offset is +9.5
scalefact = (2 + (-x + 7.5) * 4 / 15).clip(2, 6)
# scalefact *= self.sigma / 2 #;2.2513
# scalefact *= 2.2513
# scalefact.shape = (1, x.shape[1])
# This does not work - shape appears to be curved not linear.
# This is NOT a linear relationship. See calc_blc_wedge in test_poppy.
if np.abs(self.wavelength - 2.1e-6) < 0.1e-6:
polyfitcoeffs = np.array([2.01210737e-04, -7.18758337e-03, 1.12381516e-01,
-1.00877701e+00, 5.72538509e+00, -2.12943497e+01,
5.18745152e+01, -7.97815606e+01, 7.02728734e+01])
elif np.abs(self.wavelength - 4.6e-6) < 0.1e-6:
polyfitcoeffs = np.array([9.16195583e-05, -3.27354831e-03, 5.11960734e-02,
-4.59674047e-01, 2.60963397e+00, -9.70881273e+00,
2.36585911e+01, -3.63978587e+01, 3.20703511e+01])
else:
raise NotImplemented("No defined NIRCam wedge BLC mask for that wavelength? ")
sigmas = scipy.poly1d(polyfitcoeffs)(scalefact)
sigmar = sigmas * np.abs(y)
sigmar.clip(np.finfo(sigmar.dtype).tiny, out=sigmar) # avoid divide by zero -> NaNs
self.transmission = (1 - (np.sin(sigmar) / sigmar) ** 2)
# the bar should truncate at +- 10 arcsec:
woutside = np.where(np.abs(x) > 10)
self.transmission[woutside] = 1.0
# add in the ND squares. Note the positions are not exactly the same in the two wedges.
# See the figures in Krist et al. of how the 6 ND squares are spaced among the 5
# corongraph regions. Also add in the opaque border of the coronagraph mask holder.
if np.abs(self.wavelength - 2.1e-6) < 0.1e-6:
# half ND square on each side
wnd = np.where(
(y > 5) &
(
((x < -5) & (x > -10)) |
((x > 7.5) & (x < 12.5))
)
)
wborder = np.where(np.abs(y) > 10)
elif np.abs(self.wavelength - 4.6e-6) < 0.1e-6:
wnd = np.where(
(y > 5) &
(
((x < -7.5) & (x > -12.5)) |
(x > 5)
)
)
wborder = np.where((np.abs(y) > 10) | (x > 10)) # right end of mask holder
self.transmission[wnd] = np.sqrt(1e-3)
self.transmission[wborder] = 0
if not np.isfinite(self.transmission.sum()):
_log.warning("There are NaNs in the BLC mask - correcting to zero. (DEBUG LATER?)")
self.transmission[np.where(np.isfinite(self.transmission) == False)] = 0
return self.transmission
BandLimitedCoron=BandLimitedCoronagraph # Back compatibility for old name.
class IdealFQPM(AnalyticImagePlaneElement):
""" Defines an ideal 4-quadrant phase mask coronagraph, with its retardance
set perfectly to 0.5 waves at one specific wavelength and varying linearly on
either side of that. "Ideal" in the sense of ignoring chromatic effects other
than just the direct scaling of the wavelength.
Parameters
----------
name : string
Descriptive name
wavelength : float
Wavelength in meters for which the FQPM was designed, and at which there
is exactly 1/2 a wave of retardance.
"""
@utils.quantity_input(wavelength=u.meter)
def __init__(self, name="unnamed FQPM ", wavelength=10.65e-6 * u.meter, **kwargs):
AnalyticImagePlaneElement.__init__(self, **kwargs)
self.name = name
self.central_wavelength = wavelength
def get_opd(self, wave):
""" Compute the OPD appropriate for a 4QPM for some given pixel spacing
corresponding to the supplied Wavefront
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("4QPM get_opd must be called with a Wavefront to define the spacing")
assert (wave.planetype == PlaneType.image)
y, x = self.get_coordinates(wave)
phase = (1 - np.sign(x) * np.sign(y)) * 0.25
return phase * self.central_wavelength.to(u.meter).value
class CircularPhaseMask(AnalyticImagePlaneElement):
""" Circular phase mask coronagraph, with its retardance
set perfectly at one specific wavelength and varying linearly on
either side of that.
Parameters
----------
name : string
Descriptive name
radius : float
Radius of the mask
wavelength : float
Wavelength in meters for which the phase mask was designed
retardance : float
Optical path delay at that wavelength, specified in waves
relative to the reference wavelengt. Default is 0.5.
"""
@utils.quantity_input(radius=u.arcsec, wavelength=u.meter)
def __init__(self, name=None, radius=1*u.arcsec, wavelength=1e-6 * u.meter, retardance=0.5,
**kwargs):
if name is None:
name = "Phase mask r={:.3g}".format(radius)
AnalyticImagePlaneElement.__init__(self, name=name, **kwargs)
self.wavefront_display_hint = 'phase' # preferred display for wavefronts at this plane
self._default_display_size = 4*radius
self.central_wavelength = wavelength
self.radius = radius
self.retardance = retardance
def get_opd(self, wave):
""" Compute the OPD appropriate for that phase mask for some given pixel spacing
corresponding to the supplied Wavefront
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("get_opd must be called with a Wavefront to define the spacing")
assert (wave.planetype == PlaneType.image)
y, x = self.get_coordinates(wave)
r = _r(x, y)
self.opd= np.zeros(wave.shape, dtype=_float())
radius = self.radius.to(u.arcsec).value
self.opd[r <= radius] = self.retardance * self.central_wavelength.to(u.meter).value
npix = (r<=radius).sum()
if npix < 50: # pragma: no cover
import warnings
errmsg = "Phase mask is very coarsely sampled: only {} pixels. "\
"Improve sampling for better precision!".format(npix)
warnings.warn(errmsg)
_log.warn(errmsg)
return self.opd
class RectangularFieldStop(AnalyticImagePlaneElement):
""" Defines an ideal rectangular field stop
Parameters
----------
name : string
Descriptive name
width, height: float
Size of the field stop, in arcseconds. Default 0.5 width, height 5.
"""
@utils.quantity_input(width=u.arcsec, height=u.arcsec)
def __init__(self, name="unnamed field stop", width=0.5*u.arcsec, height=5.0*u.arcsec, **kwargs):
AnalyticImagePlaneElement.__init__(self, **kwargs)
self.name = name
self.width = width # width of square stop in arcseconds.
self.height = height # height of square stop in arcseconds.
self._default_display_size = max(height, width) * 1.2
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the field stop.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("IdealFieldStop get_transmission must be called with a Wavefront "
"to define the spacing")
assert (wave.planetype == PlaneType.image)
y, x = self.get_coordinates(wave)
w_outside = np.where(
(abs(y) > (self.height.to(u.arcsec).value / 2)) |
(abs(x) > (self.width.to(u.arcsec).value / 2))
)
del x # for large arrays, cleanup very promptly, before allocating self.transmission
del y
self.transmission = np.ones(wave.shape, dtype=_float())
self.transmission[w_outside] = 0
return self.transmission
class SquareFieldStop(RectangularFieldStop):
""" Defines an ideal square field stop
Parameters
----------
name : string
Descriptive name
size : float
Size of the field stop, in arcseconds. Default 20.
"""
@utils.quantity_input(size=u.arcsec)
def __init__(self, name="unnamed field stop", size=20.*u.arcsec, **kwargs):
RectangularFieldStop.__init__(self, width=size, height=size, **kwargs)
self.name = name
self.height = self.width
self._default_display_size = size * 1.2
class HexagonFieldStop(AnalyticImagePlaneElement):
""" Defines an ideal hexagonal field stop
Specify either the side length (= corner radius) or the
flat-to-flat distance, or the point-to-point diameter, in
angular units
Parameters
----------
name : string
Descriptive name
side : float, optional
side length (and/or radius) of hexagon, in arcsec. Overrides flattoflat if both are present.
flattoflat : float, optional
Distance between sides (flat-to-flat) of the hexagon, in arcsec. Default is 1.0
diameter : float, optional
point-to-point diameter of hexagon. Twice the side length. Overrides flattoflat, but is overridden by side.
Note you can also specify the standard parameter "rotation" to rotate the hexagon by some amount.
"""
@utils.quantity_input(side=u.arcsec, diameter=u.arcsec, flattoflat=u.arcsec)
def __init__(self, name=None, side=None, diameter=None, flattoflat=None, **kwargs):
if flattoflat is None and side is None and diameter is None:
self.side = 1.0 * u.arcsec
elif side is not None:
self.side = side
elif diameter is not None:
self.side = diameter / 2
else:
self.side = flattoflat / np.sqrt(3.)
if name is None:
name = "Hexagon, side length= {}".format(self.side)
AnalyticImagePlaneElement.__init__(self, name=name, **kwargs)
@property
def diameter(self):
return self.side * 2
@property
def flat_to_flat(self):
return self.side * np.sqrt(3.)
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the occulter.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("HexagonFieldStop get_transmission must be called with a Wavefront "
"to define the spacing")
assert (wave.planetype == PlaneType.image)
y, x = self.get_coordinates(wave)
side = self.side.to(u.arcsec).value
absy = np.abs(y)
self.transmission = np.zeros(wave.shape, dtype=_float())
w_rect = np.where(
(np.abs(x) <= 0.5 * side) &
(absy <= np.sqrt(3) / 2 * side)
)
w_left_tri = np.where(
(x <= -0.5 * side) &
(x >= -1 * side) &
(absy <= (x + 1 * side) * np.sqrt(3))
)
w_right_tri = np.where(
(x >= 0.5 * side) &
(x <= 1 * side) &
(absy <= (1 * side - x) * np.sqrt(3))
)
self.transmission[w_rect] = 1
self.transmission[w_left_tri] = 1
self.transmission[w_right_tri] = 1
return self.transmission
class AnnularFieldStop(AnalyticImagePlaneElement):
""" Defines a circular field stop with an (optional) opaque circular center region
Parameters
------------
name : string
Descriptive name
radius_inner : float
Radius of the central opaque region, in arcseconds. Default is 0.0 (no central opaque spot)
radius_outer : float
Radius of the circular field stop outer edge. Default is 10. Set to 0.0 for no outer edge.
"""
@utils.quantity_input(radius_inner=u.arcsec, radius_outer=u.arcsec)
def __init__(self, name="unnamed annular field stop", radius_inner=0.0, radius_outer=1.0, **kwargs):
AnalyticImagePlaneElement.__init__(self, **kwargs)
self.name = name
self.radius_inner = radius_inner
self.radius_outer = radius_outer
self._default_display_size = 2* max(radius_outer, radius_inner)
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the field stop.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("get_transmission must be called with a Wavefront to define the spacing")
assert (wave.planetype == PlaneType.image)
y, x = self.get_coordinates(wave)
r = _r(x, y)
radius_inner = self.radius_inner.to(u.arcsec).value
radius_outer = self.radius_outer.to(u.arcsec).value
pxscl = wave.pixelscale.to(u.arcsec/u.pixel).value
ypix=y/pxscl # The filled_circle_aa code and in particular pxwt doesn't seem reliable with pixel scale <1
xpix=x/pxscl
if self.radius_outer > 0:
self.transmission = geometry.filled_circle_aa(wave.shape, 0,0, radius_outer/pxscl, xarray=xpix, yarray=ypix)
else:
self.transmission = np.ones(wave.shape, dtype=_float())
if self.radius_inner > 0:
self.transmission -= geometry.filled_circle_aa(wave.shape, 0,0, radius_inner/pxscl, xarray=xpix, yarray=ypix)
return self.transmission
class CircularOcculter(AnnularFieldStop):
""" Defines an ideal circular occulter (opaque circle)
Parameters
----------
name : string
Descriptive name
radius : float
Radius of the occulting spot, in arcseconds. Default is 1.0
"""
@utils.quantity_input(radius=u.arcsec)
def __init__(self, name="unnamed occulter", radius=1.0, **kwargs):
super(CircularOcculter, self).__init__(name=name, radius_inner=radius, radius_outer=0.0, **kwargs)
self._default_display_size = 10 * u.arcsec
class BarOcculter(AnalyticImagePlaneElement):
""" Defines an ideal bar occulter (like in MIRI's Lyot coronagraph)
Parameters
----------
name : string
Descriptive name
width : float
width of the bar stop, in arcseconds. Default is 1.0
height: float
heightof the bar stop, in arcseconds. Default is 10.0
"""
@utils.quantity_input(width=u.arcsec, height=u.arcsec)
def __init__(self, name="bar occulter", width=1.0*u.arcsec, height=10.0*u.arcsec, **kwargs):
AnalyticImagePlaneElement.__init__(self, **kwargs)
self.name = name
self.width = width
self.height= height
self._default_display_size = max(height, width) * 1.2
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the occulter.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("get_transmission must be called with a Wavefront to define the spacing")
assert (wave.planetype == PlaneType.image)
y, x = self.get_coordinates(wave)
w_inside = np.where( (np.abs(x) <= self.width.to(u.arcsec).value / 2) &
(np.abs(y) <= self.height.to(u.arcsec).value / 2) )
self.transmission = np.ones(wave.shape, dtype=_float())
self.transmission[w_inside] = 0
return self.transmission
# ------ Analytic Pupil or Intermedian Plane elements (coordinates in meters) -----
class FQPM_FFT_aligner(AnalyticOpticalElement):
""" Helper class for modeling FQPMs accurately
Adds (or removes) a slight wavelength- and pixel-scale-dependent tilt
to a pupil wavefront, to ensure the correct alignment of the image plane
FFT'ed PSF with the desired quad pixel alignment for the FQPM.
This is purely a computational convenience tool to work around the
pixel coordinate restrictions imposed by the FFT algorithm,
not a representation of any physical optic.
Parameters
----------
direction : string
'forward' or 'backward'
"""
def __init__(self, name="FQPM FFT aligner", direction='forward', **kwargs):
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.pupil, **kwargs)
direction = direction.lower()
if direction != 'forward' and direction != 'backward':
raise ValueError("Invalid direction %s, must be either"
"forward or backward." % direction)
self.direction = direction
self._suppress_display = True
self.wavefront_display_hint = 'phase' # preferred display for wavefronts at this plane
def get_opd(self, wave):
""" Compute the required tilt needed to get the PSF centered on the corner between
the 4 central pixels, not on the central pixel itself.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("FQPM get_opd must be called with a Wavefront to define the spacing")
assert wave.planetype != PlaneType.image, "This optic does not work on image planes"
fft_im_pixelscale = wave.wavelength / wave.diam / wave.oversample * u.radian
required_offset = -fft_im_pixelscale * 0.5
if self.direction == 'backward':
required_offset *= -1
wave._image_centered = 'pixel'
else:
wave._image_centered = 'corner'
wave.tilt(required_offset, required_offset)
# gotta return something... so return a value that will not affect the wave any more.
return 0 # null OPD
class ParityTestAperture(AnalyticOpticalElement):
""" Defines a circular pupil aperture with boxes cut out.
This is mostly a test aperture, which has no symmetry and thus can be used to
test the various Fourier transform algorithms and sign conventions.
Parameters
----------
name : string
Descriptive name
radius : float
Radius of the pupil, in meters. Default is 1.0
pad_factor : float, optional
Amount to oversize the wavefront array relative to this pupil.
This is in practice not very useful, but it provides a straightforward way
of verifying during code testing that the amount of padding (or size of the circle)
does not make any numerical difference in the final result.
"""
@utils.quantity_input(radius=u.meter)
def __init__(self, name=None, radius=1.0 * u.meter, pad_factor=1.0, **kwargs):
if name is None: name = "Asymmetric Parity Test Aperture, radius={}".format(radius)
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.pupil, **kwargs)
self.radius = radius
# for creating input wavefronts - let's pad a bit:
self.pupil_diam = pad_factor * 2 * self.radius
self.wavefront_display_hint = 'intensity' # preferred display for wavefronts at this plane
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the occulter.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("CircularAperture get_opd must be called with a Wavefront "
"to define the spacing")
assert (wave.planetype != PlaneType.image)
radius = self.radius.to(u.meter).value
y, x = self.get_coordinates(wave)
r = _r(x, y)
w_outside = np.where(r > radius)
self.transmission = np.ones(wave.shape, dtype=_float())
self.transmission[w_outside] = 0
w_box1 = np.where(
(r > (radius * 0.5)) &
(np.abs(x) < radius * 0.1) &
(y < 0)
)
w_box2 = np.where(
(r > (radius * 0.75)) &
(np.abs(y) < radius * 0.2) &
(x < 0)
)
self.transmission[w_box1] = 0
self.transmission[w_box2] = 0
return self.transmission
class CircularAperture(AnalyticOpticalElement):
""" Defines an ideal circular pupil aperture
Parameters
----------
name : string
Descriptive name
radius : float
Radius of the pupil, in meters. Default is 1.0
gray_pixel : bool
Apply gray pixel approximation to return fractional transmission for
edge pixels that are only partially within this aperture?
pad_factor : float, optional
Amount to oversize the wavefront array relative to this pupil.
This is in practice not very useful, but it provides a straightforward way
of verifying during code testing that the amount of padding (or size of the circle)
does not make any numerical difference in the final result.
"""
@utils.quantity_input(radius=u.meter)
def __init__(self, name=None, radius=1.0 * u.meter, pad_factor=1.0, planetype=PlaneType.unspecified,
gray_pixel=True, **kwargs):
if name is None:
name = "Circle, radius={}".format(radius)
super(CircularAperture, self).__init__(name=name, planetype=planetype, **kwargs)
if radius <= 0*u.meter:
raise ValueError("radius must be a positive nonzero number.")
self.radius = radius
# for creating input wavefronts - let's pad a bit:
self.pupil_diam = pad_factor * 2 * self.radius
self._default_display_size = 3 * self.radius
self._use_gray_pixel = bool(gray_pixel)
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the aperture.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("CircularAperture get_transmission must be called with a Wavefront "
"to define the spacing")
assert (wave.planetype != PlaneType.image)
y, x = self.get_coordinates(wave)
radius = self.radius.to(u.meter).value
if self._use_gray_pixel:
pixscale = wave.pixelscale.to(u.meter/u.pixel).value
self.transmission = geometry.filled_circle_aa(wave.shape, 0, 0, radius/pixscale, x/pixscale, y/pixscale)
else:
r = _r(x, y)
del x
del y
w_outside = np.where(r > radius)
del r
self.transmission = np.ones(wave.shape, dtype=_float())
self.transmission[w_outside] = 0
return self.transmission
class HexagonAperture(AnalyticOpticalElement):
""" Defines an ideal hexagonal pupil aperture
Specify either the side length (= corner radius) or the
flat-to-flat distance, or the point-to-point diameter.
Parameters
----------
name : string
Descriptive name
side : float, optional
side length (and/or radius) of hexagon, in meters. Overrides flattoflat if both are present.
flattoflat : float, optional
Distance between sides (flat-to-flat) of the hexagon, in meters. Default is 1.0
diameter : float, optional
point-to-point diameter of hexagon. Twice the side length. Overrides flattoflat, but is overridden by side.
"""
@utils.quantity_input(side=u.meter, diameter=u.meter, flattoflat=u.meter)
def __init__(self, name=None, side=None, diameter=None, flattoflat=None, **kwargs):
if flattoflat is None and side is None and diameter is None:
self.side = 1.0 * u.meter
elif side is not None:
self.side = side
elif diameter is not None:
self.side = diameter / 2
else:
self.side = flattoflat / np.sqrt(3.)
self.pupil_diam = 2 * self.side # for creating input wavefronts
self._default_display_size = 3 * self.side
if name is None:
name = "Hexagon, side length= {}".format(self.side)
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.pupil, **kwargs)
@property
def diameter(self):
return self.side * 2
@property
def flat_to_flat(self):
return self.side * np.sqrt(3.)
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the occulter.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("HexagonAperture get_transmission must be called with a Wavefront "
"to define the spacing")
assert (wave.planetype != PlaneType.image)
y, x = self.get_coordinates(wave)
side = self.side.to(u.meter).value
absy = np.abs(y)
self.transmission = np.zeros(wave.shape, dtype=_float())
w_rect = np.where(
(np.abs(x) <= 0.5 * side) &
(absy <= np.sqrt(3) / 2 * side)
)
w_left_tri = np.where(
(x <= -0.5 * side) &
(x >= -1 * side) &
(absy <= (x + 1 * side) * np.sqrt(3))
)
w_right_tri = np.where(
(x >= 0.5 * side) &
(x <= 1 * side) &
(absy <= (1 * side - x) * np.sqrt(3))
)
self.transmission[w_rect] = 1
self.transmission[w_left_tri] = 1
self.transmission[w_right_tri] = 1
return self.transmission
class MultiHexagonAperture(AnalyticOpticalElement):
""" Defines a hexagonally segmented aperture
Parameters
----------
name : string
Descriptive name
rings : integer
The number of rings of hexagons to include, not counting the central segment
(i.e. 2 for a JWST-like aperture, 3 for a Keck-like aperture, and so on)
side : float, optional
side length (and/or radius) of hexagon, in meters. Overrides flattoflat if both are present.
flattoflat : float, optional
Distance between sides (flat-to-flat) of the hexagon, in meters. Default is 1.0
gap: float, optional
Gap between adjacent segments, in meters. Default is 0.01 m = 1 cm
center : bool, optional
should the central segment be included? Default is False.
segmentlist : list of ints, optional
This allows one to specify that only a subset of segments are present, for a
partially populated segmented telescope, non-redundant segment set, etc.
Segments are numbered from 0 for the center segment, 1 for the segment immediately
above it, and then clockwise around each ring.
For example, segmentlist=[1,3,5] would make an aperture of 3 segments.
Note that this routine becomes a bit slow for nrings >4. For repeated computations on
the same aperture, avoid repeated evaluations of this function. It will be faster to create
this aperture, evalute it once, and save the result onto a discrete array, via either
(1) saving it to a FITS file using the to_fits() method, and then use that in a
FITSOpticalElement, or
(2) Use the fixed_sampling_optic function to create an ArrayOpticalElement with
a sampled version of this.
"""
@utils.quantity_input(side=u.meter, flattoflat=u.meter, gap=u.meter)
def __init__(self, name="MultiHex", flattoflat=1.0, side=None, gap=0.01, rings=1,
segmentlist=None, center=False, **kwargs):
if flattoflat is None and side is None:
self.side = 1.0 * u.meter
elif side is not None:
self.side = side
else:
self.side = flattoflat / np.sqrt(3.)
self.flattoflat = self.side * np.sqrt(3)
self.rings = rings
self.gap = gap
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.pupil, **kwargs)
self.pupil_diam = (self.flattoflat + self.gap) * (2 * self.rings + 1)
# make a list of all the segments included in this hex aperture
if segmentlist is not None:
self.segmentlist = segmentlist
else:
self.segmentlist = list(range(self._n_hexes_inside_ring(self.rings + 1)))
if not center:
self.segmentlist.remove(0) # remove center segment 0
def _n_hexes_in_ring(self, n):
""" How many hexagons in ring N? """
return 1 if n == 0 else 6 * n
def _n_hexes_inside_ring(self, n):
""" How many hexagons interior to ring N, not counting N?"""
return sum([self._n_hexes_in_ring(i) for i in range(n)])
def _hex_in_ring(self, hex_index):
""" What ring is a given hexagon in?"""
if hex_index == 0:
return 0
for i in range(100):
if self._n_hexes_inside_ring(i) <= hex_index < self._n_hexes_inside_ring(i + 1):
return i
raise ValueError("Loop exceeded! MultiHexagonAperture is limited to <100 rings of hexagons.")
def _hex_radius(self, hex_index):
""" Radius of a given hexagon from the center """
ring = self._hex_in_ring(hex_index)
if ring <= 1:
return (self.flattoflat + self.gap) * ring
def _hex_center(self, hex_index):
""" Center coordinates of a given hexagon
counting clockwise around each ring
Returns y, x coords
"""
ring = self._hex_in_ring(hex_index)
# handle degenerate case of center segment
# to avoid div by 0 in the main code below
if ring == 0:
return 0, 0
# now count around from the starting point:
index_in_ring = hex_index - self._n_hexes_inside_ring(ring) + 1 # 1-based
angle_per_hex = 2 * np.pi / self._n_hexes_in_ring(ring) # angle in radians
# Now figure out what the radius is:
flattoflat = self.flattoflat.to(u.meter).value
gap = self.gap.to(u.meter).value
side = self.side.to(u.meter).value
radius = (flattoflat + gap) * ring # JWST 'B' segments, aka corners
if np.mod(index_in_ring, ring) == 1:
angle = angle_per_hex * (index_in_ring - 1)
ypos = radius * np.cos(angle)
xpos = radius * np.sin(angle)
else:
# find position of previous 'B' type segment.
last_B_angle = ((index_in_ring - 1) // ring) * ring * angle_per_hex
ypos0 = radius * np.cos(last_B_angle)
xpos0 = radius * np.sin(last_B_angle)
# count around from that corner
da = (flattoflat + gap) * np.cos(30 * np.pi / 180)
db = (flattoflat + gap) * np.sin(30 * np.pi / 180)
whichside = (index_in_ring - 1) // ring # which of the sides are we on?
if whichside == 0:
dx, dy = da, -db
elif whichside == 1:
dx, dy = 0, -(flattoflat + gap)
elif whichside == 2:
dx, dy = -da, -db
elif whichside == 3:
dx, dy = -da, db
elif whichside == 4:
dx, dy = 0, (flattoflat + gap)
elif whichside == 5:
dx, dy = da, db
xpos = xpos0 + dx * np.mod(index_in_ring - 1, ring)
ypos = ypos0 + dy * np.mod(index_in_ring - 1, ring)
return ypos, xpos
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the occulter.
"""
if not isinstance(wave, BaseWavefront):
raise ValueError("get_transmission must be called with a Wavefront to define the spacing")
assert (wave.planetype != PlaneType.image)
self.transmission = np.zeros(wave.shape, dtype=_float())
for i in self.segmentlist:
self._one_hexagon(wave, i)
return self.transmission
def _one_hexagon(self, wave, index, value=1):
""" Draw one hexagon into the self.transmission array """
y, x = self.get_coordinates(wave)
side = self.side.to(u.meter).value
ceny, cenx = self._hex_center(index)
y -= ceny
x -= cenx
absy = np.abs(y)
w_rect = np.where(
(np.abs(x) <= 0.5 * side) &
(absy <= np.sqrt(3) / 2 * side)
)
w_left_tri = np.where(
(x <= -0.5 * side) &
(x >= -1 * side) &
(absy <= (x + 1 * side) * np.sqrt(3))
)
w_right_tri = np.where(
(x >= 0.5 * side) &
(x <= 1 * side) &
(absy <= (1 * side - x) * np.sqrt(3))
)
self.transmission[w_rect] = value
self.transmission[w_left_tri] = value
self.transmission[w_right_tri] = value
class NgonAperture(AnalyticOpticalElement):
""" Defines an ideal N-gon pupil aperture.
Parameters
-----------
name : string
Descriptive name
nsides : integer
Number of sides. Default is 6.
radius : float
radius to the vertices, meters. Default is 1.
rotation : float
Rotation angle to first vertex, in degrees counterclockwise from the +X axis. Default is 0.
"""
@utils.quantity_input(radius=u.meter)
def __init__(self, name=None, nsides=6, radius=1 * u.meter, rotation=0., **kwargs):
self.radius = radius
self.nsides = nsides
self.pupil_diam = 2 * self.radius # for creating input wavefronts
if name is None:
name = "{}-gon, radius= {}".format(self.nsides, self.radius)
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.pupil, rotation=rotation, **kwargs)
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the occulter.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("get_transmission must be called with a Wavefront to define the spacing")
assert (wave.planetype != PlaneType.image)
y, x = self.get_coordinates(wave)
phase = self.rotation * np.pi / 180
vertices = np.zeros((self.nsides, 2), dtype=_float())
for i in range(self.nsides):
vertices[i] = [np.cos(i * 2 * np.pi / self.nsides + phase),
np.sin(i * 2 * np.pi / self.nsides + phase)]
vertices *= self.radius.to(u.meter).value
self.transmission = np.zeros(wave.shape, dtype=_float())
for row in range(wave.shape[0]):
pts = np.asarray(list(zip(x[row], y[row])))
ok = matplotlib.path.Path(vertices).contains_points(pts)
self.transmission[row][ok] = 1.0
return self.transmission
class RectangleAperture(AnalyticOpticalElement):
""" Defines an ideal rectangular pupil aperture
Parameters
----------
name : string
Descriptive name
width : float
width of the rectangle, in meters. Default is 0.5
height : float
height of the rectangle, in meters. Default is 1.0
rotation : float
Rotation angle for 'width' axis. Default is 0.
"""
@utils.quantity_input(width=u.meter, height=u.meter)
def __init__(self, name=None, width=0.5 * u.meter, height=1.0 * u.meter, rotation=0.0, **kwargs):
self.width = width
self.height = height
if name is None:
name = "Rectangle, size= {s.width:.1f} wide * {s.height:.1f} high".format(s=self)
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.pupil, rotation=rotation, **kwargs)
# for creating input wavefronts:
self.pupil_diam = np.sqrt(self.height ** 2 + self.width ** 2)
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the occulter.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("get_transmission must be called with a Wavefront to define the spacing")
assert (wave.planetype != PlaneType.image)
y, x = self.get_coordinates(wave)
w_outside = np.where(
(abs(y) > (self.height.to(u.meter).value / 2)) |
(abs(x) > (self.width.to(u.meter).value / 2))
)
del y
del x
self.transmission = np.ones(wave.shape, dtype=_float())
self.transmission[w_outside] = 0
return self.transmission
class SquareAperture(RectangleAperture):
""" Defines an ideal square pupil aperture
Parameters
----------
name : string
Descriptive name
size: float
side length of the square, in meters. Default is 1.0
rotation : float
Rotation angle for the square. Default is 0.
"""
@utils.quantity_input(size=u.meter)
def __init__(self, name=None, size=1.0 * u.meter, **kwargs):
self._size = size
if name is None:
name = "Square, side length= {}".format(size)
RectangleAperture.__init__(self, name=name, width=size, height=size, **kwargs)
self.size = size
self.pupil_diam = 2 * self.size # for creating input wavefronts
@property
def size(self):
return self._size
@size.setter
def size(self, value):
self._size = value
self.height = value
self.width = value
class SecondaryObscuration(AnalyticOpticalElement):
""" Defines the central obscuration of an on-axis telescope including secondary mirror and
supports
The number of supports is adjustable but they are always radially symmetric around the center.
See AsymmetricSecondaryObscuration if you need more flexibility.
Parameters
----------
secondary_radius : float or astropy Quantity length
Radius of the circular secondary obscuration, in meters or other unit.
Default 0.5 m
n_supports : int
Number of secondary mirror supports ("spiders"). These will be
spaced equally around a circle. Default is 4.
support_width : float or astropy Quantity length
Width of each support, in meters or other unit. Default is 0.01 m = 1 cm.
support_angle_offset : float
Angular offset, in degrees, of the first secondary support from the X axis.
"""
@utils.quantity_input(secondary_radius=u.meter, support_width=u.meter)
def __init__(self, name=None, secondary_radius=0.5 * u.meter, n_supports=4, support_width=0.01 * u.meter,
support_angle_offset=0.0, **kwargs):
if name is None:
name = "Secondary Obscuration with {0} supports".format(n_supports)
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.pupil, **kwargs)
self.secondary_radius = secondary_radius
self.n_supports = n_supports
self.support_width = support_width
self.support_angle_offset = support_angle_offset
# for creating input wavefronts if this is the first optic in a opticalsystem:
self.pupil_diam = 4 * self.secondary_radius
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the obscuration
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("get_transmission must be called with a Wavefront to define the spacing")
assert (wave.planetype != PlaneType.image)
self.transmission = np.ones(wave.shape, dtype=_float())
y, x = self.get_coordinates(wave)
r = np.sqrt(x ** 2 + y ** 2) # * wave.pixelscale
self.transmission[r < self.secondary_radius.to(u.meter).value] = 0
for i in range(self.n_supports):
angle = 2 * np.pi / self.n_supports * i + np.deg2rad(self.support_angle_offset)
# calculate rotated x' and y' coordinates after rotation by that angle.
xp = np.cos(angle) * x + np.sin(angle) * y
yp = -np.sin(angle) * x + np.cos(angle) * y
self.transmission[(xp > 0) & (np.abs(yp) < self.support_width.to(u.meter).value / 2)] = 0
# TODO check here for if there are no pixels marked because the spider is too thin.
# In that case use a grey scale approximation
return self.transmission
class AsymmetricSecondaryObscuration(SecondaryObscuration):
""" Defines a central obscuration with one or more supports which can be oriented at
arbitrary angles around the primary mirror, a la the three supports of JWST
This also allows for secondary supports that do not intersect with
the primary mirror center; use the support_offset_x and support_offset_y parameters
to apply offsets relative to the center for the origin of each strut.
Parameters
----------
secondary_radius : float
Radius of the circular secondary obscuration. Default 0.5 m
support_angle : ndarray or list of floats
The angle measured counterclockwise from +Y for each support
support_width : float or astropy Quantity of type length, or list of those
if scalar, gives the width for all support struts
if a list, gives separately the width for each support strut independently.
Widths in meters or other unit if specified. Default is 0.01 m = 1 cm.
support_offset_x : float, or list of floats.
Offset in the X direction of the start point for each support.
if scalar, applies to all supports; if a list, gives a separate offset for each.
support_offset_y : float, or list of floats.
Offset in the Y direction of the start point for each support.
if scalar, applies to all supports; if a list, gives a separate offset for each.
"""
@utils.quantity_input(support_width=u.meter)
def __init__(self, support_angle=(0, 90, 240), support_width=0.01 * u.meter,
support_offset_x=0.0, support_offset_y=0.0, **kwargs):
SecondaryObscuration.__init__(self, n_supports=len(support_angle), **kwargs)
self.support_angle = np.asarray(support_angle)
if np.isscalar(support_width.value):
support_width = np.zeros(len(support_angle)) + support_width
self.support_width = support_width
if np.isscalar(support_offset_x):
support_offset_x = np.zeros(len(support_angle)) + support_offset_x
self.support_offset_x = support_offset_x
if np.isscalar(support_offset_y):
support_offset_y = np.zeros(len(support_angle)) + support_offset_y
self.support_offset_y = support_offset_y
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the obscuration
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("get_transmission must be called with a Wavefront to define the spacing")
assert (wave.planetype != PlaneType.image)
self.transmission = np.ones(wave.shape, dtype=_float())
y, x = self.get_coordinates(wave)
r = np.sqrt(x ** 2 + y ** 2)
self.transmission[r < self.secondary_radius.to(u.meter).value] = 0
for angle_deg, width, offset_x, offset_y in zip(self.support_angle,
self.support_width,
self.support_offset_x,
self.support_offset_y):
angle = np.deg2rad(angle_deg + 90) # 90 deg offset is to start from the +Y direction
# calculate rotated x' and y' coordinates after rotation by that angle.
# and application of offset
xp = np.cos(angle) * (x - offset_x) + np.sin(angle) * (y - offset_y)
yp = -np.sin(angle) * (x - offset_x) + np.cos(angle) * (y - offset_y)
self.transmission[(xp > 0) & (np.abs(yp) < width.to(u.meter).value / 2)] = 0
# TODO check here for if there are no pixels marked because the spider is too thin.
# In that case use a grey scale approximation
return self.transmission
class ThinLens(CircularAperture):
""" An idealized thin lens, implemented as a Zernike defocus term.
The sign convention adopted is the usual for lenses: a "positive" lens
is converging (i.e. convex), a "negative" lens is diverging (i.e. concave).
In other words, a positive number of waves of defocus indicates a
lens with positive OPD at the center, and negative at its rim.
(Note, this is opposite the sign convention for Zernike defocus)
Parameters
-------------
nwaves : float
The number of waves of defocus, peak to valley. May be positive or negative.
This is applied as a normalization over an area defined by the circumscribing circle
of the input wavefront. That is, there will be nwaves defocus peak-to-valley
over the region of the pupil that has nonzero input intensity.
reference_wavelength : float
Wavelength, in meters, at which that number of waves of defocus is specified.
radius : float
Pupil radius, in meters, over which the Zernike defocus term should be computed
such that rho = 1 at r = `radius`.
"""
@utils.quantity_input(reference_wavelength=u.meter)
def __init__(self, name='Thin lens', nwaves=4.0, reference_wavelength=1e-6 * u.meter,
radius=1.0*u.meter, **kwargs):
self.reference_wavelength = reference_wavelength
self.nwaves = nwaves
self.max_phase_delay = reference_wavelength * nwaves
CircularAperture.__init__(self, name=name, radius=radius, **kwargs)
self.wavefront_display_hint = 'phase' # preferred display for wavefronts at this plane
def get_opd(self, wave):
y, x = self.get_coordinates(wave)
r = np.sqrt(x ** 2 + y ** 2)
r_norm = r / self.radius.to(u.meter).value
# don't forget the factor of 0.5 to make the scaling factor apply as peak-to-valley
# rather than center-to-peak
defocus_zernike = ((2 * r_norm ** 2 - 1) *
(0.5 * self.nwaves * self.reference_wavelength.to(u.meter).value))
# add negative sign here to get desired sign convention
opd = -defocus_zernike
# the thin lens is explicitly also a circular aperture:
# we use the aperture instensity here to mask the OPD we return, in
# order to avoid bogus values outside the aperture
aperture_intensity = CircularAperture.get_transmission(self, wave)
opd[aperture_intensity==0] = 0
return opd
class GaussianAperture(AnalyticOpticalElement):
""" Defines an ideal Gaussian apodized pupil aperture,
or at least as much of one as can be fit into a finite-sized
array
The Gaussian's width must be set with either the fwhm or w parameters.
Note that this makes an optic whose electric *field amplitude*
transmission is the specified Gaussian; thus the intensity
transmission will be the square of that Gaussian.
Parameters
----------
name : string
Descriptive name
fwhm : float, optional.
Full width at half maximum for the Gaussian, in meters.
w : float, optional
Beam width parameter, equal to fwhm/(2*sqrt(ln(2))).
pupil_diam : float, optional
default pupil diameter for cases when it is not otherwise
specified (e.g. displaying the optic by itself.) Default
value is 3x the FWHM.
"""
@utils.quantity_input(fwhm=u.meter, w=u.meter, pupil_diam=u.meter)
def __init__(self, name=None, fwhm=None, w=None, pupil_diam=None, **kwargs):
if fwhm is None and w is None:
raise ValueError("Either the fwhm or w parameter must be set.")
elif w is not None:
self.w = w
elif fwhm is not None:
self.w = fwhm / (2 * np.sqrt(np.log(2)))
if pupil_diam is None:
pupil_diam = 3 * self.fwhm # for creating input wavefronts
self.pupil_diam = pupil_diam
if name is None:
name = "Gaussian aperture with fwhm ={0:.2f}".format(self.fwhm)
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.pupil, **kwargs)
@property
def fwhm(self):
return self.w * (2 * np.sqrt(np.log(2)))
def get_transmission(self, wave):
""" Compute the transmission inside/outside of the aperture.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("get_transmission must be called with a Wavefront to define the spacing")
y, x = self.get_coordinates(wave)
r = np.sqrt(x ** 2 + y ** 2)
transmission = np.exp((- (r / self.w.to(u.meter).value) ** 2))
return transmission
# ------ generic analytic optics ------
class KnifeEdge(AnalyticOpticalElement):
""" A half-infinite opaque plane, with a perfectly sharp edge
through the origin.
Use the 'rotation', 'shift_x', and 'shift_y' parameters to adjust
location and orientation.
Rotation=0 yields a knife edge oriented vertically (edge parallel to +y)
with the opaque side to the right.
"""
def __init__(self, name=None, rotation=0, **kwargs):
if name is None:
name = "Knife edge at {} deg".format(rotation)
AnalyticOpticalElement.__init__(self, name=name, rotation=rotation, **kwargs)
def get_transmission(self, wave):
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("get_transmission must be called with a Wavefront to define the spacing")
y, x = self.get_coordinates(wave)
return x < 0
class CompoundAnalyticOptic(AnalyticOpticalElement):
""" Define a compound analytic optical element made up of the combination
of two or more individual optical elements.
This is just a convenience routine for semantic organization of optics.
It can be useful to keep the list of optical planes cleaner, but
you can certainly just add a whole bunch of planes all in a row without
using this class to group them.
All optics should be of the same plane type (pupil or image); propagation between
different optics contained inside one compound is not supported.
Parameters
----------
opticslist : list
A list of AnalyticOpticalElements to be merged together.
mergemode : string, default = 'and'
Method for merging transmissions:
'and' : resulting transmission is product of constituents. (E.g
trans = trans1*trans2)
'or' : resulting transmission is sum of constituents, with overlap
subtracted. (E.g. trans = trans1 + trans2 - trans1*trans2)
In both methods, the resulting OPD is the sum of the constituents' OPDs.
"""
def _validate_only_analytic_optics(self, optics_list):
for optic in optics_list:
if isinstance(optic, AnalyticOpticalElement):
continue # analytic elements are allowed
elif isinstance(optic, InverseTransmission):
if isinstance(optic.uninverted_optic, AnalyticOpticalElement):
continue # inverted elements are allowed, as long as they're analytic elements
else:
return False # inverted non-analytic elements aren't allowed, skip the rest
else:
return False # no other types allowed, skip the rest of the list
return True
def __init__(self, opticslist=None, name="unnamed", mergemode="and", verbose=True, **kwargs):
if opticslist is None:
raise ValueError("Missing required opticslist argument to CompoundAnalyticOptic")
AnalyticOpticalElement.__init__(self, name=name, verbose=verbose, **kwargs)
self.opticslist = []
self.planetype = None
# check for valid mergemode
if mergemode == "and":
self.mergemode = "and"
elif mergemode == "or":
self.mergemode = "or"
else:
raise ValueError("mergemode must be either 'and' or 'or'.")
for optic in opticslist:
if not self._validate_only_analytic_optics(opticslist):
raise ValueError("Supplied optics list to CompoundAnalyticOptic can "
"only contain AnalyticOptics")
else:
# if we are adding the first optic in the list, check what type of optical plane
# it has
# for subsequent optics, validate they have the same type
if len(self.opticslist) == 0:
self.planetype = optic.planetype
elif (self.planetype != optic.planetype and self.planetype != PlaneType.unspecified and
optic.planetype != PlaneType.unspecified):
raise ValueError("Cannot mix image plane and pupil plane optics in "
"the same CompoundAnalyticOptic")
self.opticslist.append(optic)
if hasattr(optic, '_default_display_size'):
if hasattr(self, '_default_display_size'):
self._default_display_size = max(self._default_display_size,
optic._default_display_size)
else:
self._default_display_size = optic._default_display_size
if hasattr(optic, 'pupil_diam'):
if not hasattr(self, 'pupil_diam'):
self.pupil_diam = optic.pupil_diam
else:
self.pupil_diam = max(self.pupil_diam, optic.pupil_diam)
if self.planetype == PlaneType.pupil:
if all([hasattr(o, 'pupil_diam') for o in self.opticslist]):
self.pupil_diam = np.asarray([o.pupil_diam.to(u.meter).value for o in self.opticslist]).max() * u.meter
def get_transmission(self, wave):
if self.mergemode == "and":
trans = np.ones(wave.shape, dtype=_float())
for optic in self.opticslist:
trans *= optic.get_transmission(wave)
elif self.mergemode == "or":
trans = np.zeros(wave.shape, dtype=_float())
for optic in self.opticslist:
trans = trans + optic.get_transmission(wave) - trans * optic.get_transmission(wave)
else:
raise ValueError("mergemode must be either 'and' or 'or'.")
self.transmission = trans
return self.transmission
def get_opd(self, wave):
opd = np.zeros(wave.shape, dtype=_float())
for optic in self.opticslist:
opd += optic.get_opd(wave)
self.opd = opd
return self.opd
# ------ convert analytic optics to array optics ------
def fixed_sampling_optic(optic, wavefront, oversample=2):
"""Convert a variable-sampling AnalyticOpticalElement to a fixed-sampling ArrayOpticalElement
For a given input optic this produces an equivalent output optic stored in simple arrays rather
than created each time via function calls.
If you know a priori the desired sampling will remain constant for some
application, and don't need any of the other functionality of the
AnalyticOpticalElement machinery with get_opd and get_transmission functions,
you can save time by setting the sampling to a fixed value and saving arrays
computed on that sampling.
Also, you can use this to evaluate any optic on a finer sampling scale and then bin the
results to the desired scale, using the so-called gray-pixel approximation. (i.e. the
value for each output pixel is computed as the average of N*N finer pixels in an
intermediate array.)
Parameters
----------
optic : poppy.AnalyticOpticalElement
Some optical element
wave : poppy.Wavefront
A wavefront to define the desired sampling pixel size and number.
oversample : int
Subpixel sampling factor for "gray pixel" approximation: the optic will be
evaluated on a finer pixel scale and then binned down to the desired sampling.
Returns
-------
new_array_optic : poppy.ArrayOpticalElement
A version ofthe input optic with fixed arrays for OPD and transmission.
"""
from .poppy_core import ArrayOpticalElement
npix = wavefront.shape[0]
grid_size = npix*u.pixel*wavefront.pixelscale
_log.debug("Converting {} to fixed sampling with grid_size={}, npix={}, oversample={}".format(
optic.name, grid_size, npix, oversample))
if oversample>1:
_log.debug("retrieving oversampled opd and transmission arrays")
sampled_opd = optic.sample(what='opd', npix=npix*oversample, grid_size=grid_size)
sampled_trans = optic.sample(what='amplitude', npix=npix*oversample, grid_size=grid_size)
_log.debug("binning down opd and transmission arrays")
sampled_opd = utils.krebin(sampled_opd, wavefront.shape)/oversample**2
sampled_trans = utils.krebin(sampled_trans, wavefront.shape)/oversample**2
else:
sampled_opd = optic.sample(what='opd', npix=npix, grid_size=grid_size)
sampled_trans = optic.sample(what='amplitude', npix=npix, grid_size=grid_size)
return ArrayOpticalElement(opd=sampled_opd,
transmission=sampled_trans,
pixelscale=wavefront.pixelscale,
name=optic.name)
| 41.413704
| 133
| 0.621579
|
5d7f616e8bdd5cf40c4c2fe0faee4fec24e52050
| 587
|
py
|
Python
|
rest_api/config.py
|
spilioeve/WM-src
|
ba9265360bdac848b119ec11456b8f65bf2b1af8
|
[
"MIT"
] | 2
|
2020-04-28T21:37:44.000Z
|
2020-05-05T14:28:11.000Z
|
rest_api/config.py
|
spilioeve/WM-src
|
ba9265360bdac848b119ec11456b8f65bf2b1af8
|
[
"MIT"
] | 1
|
2021-05-28T17:52:39.000Z
|
2021-05-28T20:56:13.000Z
|
rest_api/config.py
|
spilioeve/WM-src
|
ba9265360bdac848b119ec11456b8f65bf2b1af8
|
[
"MIT"
] | 3
|
2021-04-23T15:01:06.000Z
|
2021-07-30T22:41:50.000Z
|
# Configuration file for `REST_API.py`
# Set to True for enhanced logging
DEBUG = False
# Set to true to use Redis (per specified configuration)
REDIS = False
# Set the default number of seconds results should be
# stored by Redis (604800 is equivalent to 7 days)
REDIS_TTL = 604800
# Specify Redis Instance (ignored if app.config['REDIS'] == False)
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
# Set to local unzipped CoreNLP Path
CORENLP = 'PATH_TO_CORENLP'
# Set basic auth username and password
BASIC_AUTH_USERNAME = 'TESTUSER'
BASIC_AUTH_PASSWORD = 'TESTPASSWORD'
| 25.521739
| 66
| 0.761499
|
d44b1892942b7494360f657d80cfc87481fc1ff8
| 1,192
|
py
|
Python
|
setup.py
|
ltirrell/pyCompare
|
a856ebc540f7ed0a15fcb6a3a00823b6501a37fa
|
[
"MIT"
] | null | null | null |
setup.py
|
ltirrell/pyCompare
|
a856ebc540f7ed0a15fcb6a3a00823b6501a37fa
|
[
"MIT"
] | null | null | null |
setup.py
|
ltirrell/pyCompare
|
a856ebc540f7ed0a15fcb6a3a00823b6501a37fa
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import os
basepath = os.path.realpath(__file__)
basepath = os.path.dirname(basepath)
path = os.path.join(basepath, 'pyCompare', 'VERSION')
with open(path, 'r') as file:
VERSION = file.readline().strip()
path = os.path.join(basepath, 'README.md')
with open(path, 'r') as file:
README = file.read()
setup(name='pyCompare',
version=VERSION,
description='Bland-Altman plots for Python',
url='https://github.com/jaketmp/pyCompare',
author='Jake TM Pearce',
license='MIT',
packages=find_packages(),
install_requires=[
'numpy>=1.14.2',
'scipy>=1.0.1',
'matplotlib>=3.0.2',
],
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Visualization",
],
long_description_content_type='text/markdown',
long_description = README,
documentation='https://github.com/jaketmp/pyCompare',
include_package_data=True,
zip_safe=True
)
| 27.090909
| 55
| 0.707215
|
3652f57bb66cddc91781a003b8ae2fcc3e3e37d3
| 550
|
py
|
Python
|
cdl_rest_api/migrations/0005_experimentresult_experimentdata.py
|
zilkf92/cdl-django-webservice
|
18536859a13c16e85f05bad14912f8d1939a78ea
|
[
"MIT"
] | 1
|
2022-01-25T13:52:03.000Z
|
2022-01-25T13:52:03.000Z
|
cdl_rest_api/migrations/0005_experimentresult_experimentdata.py
|
zilkf92/cdl-django-webservice
|
18536859a13c16e85f05bad14912f8d1939a78ea
|
[
"MIT"
] | null | null | null |
cdl_rest_api/migrations/0005_experimentresult_experimentdata.py
|
zilkf92/cdl-django-webservice
|
18536859a13c16e85f05bad14912f8d1939a78ea
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.13 on 2021-12-21 13:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cdl_rest_api', '0004_coincidences_countrates_experimentdata'),
]
operations = [
migrations.AddField(
model_name='experimentresult',
name='experimentData',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cdl_rest_api.experimentdata'),
),
]
| 27.5
| 139
| 0.68
|
2f449dc8f69e96d38c527f2a93abe97a4d4c469f
| 3,923
|
py
|
Python
|
python/tvm/hybrid/util.py
|
Orion34C/incubator-tvm
|
27a02844cb52e883a4a66da68a527590d76f7d01
|
[
"Apache-2.0"
] | 1
|
2021-03-20T02:03:00.000Z
|
2021-03-20T02:03:00.000Z
|
python/tvm/hybrid/util.py
|
Orion34C/incubator-tvm
|
27a02844cb52e883a4a66da68a527590d76f7d01
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/hybrid/util.py
|
Orion34C/incubator-tvm
|
27a02844cb52e883a4a66da68a527590d76f7d01
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Internal utilities for parsing Python subset to HalideIR"""
import ast
import inspect
import logging
import sys
import numpy
from tvm._ffi.base import numeric_types
from tvm.ir.container import Array
from tvm.tir import expr as _expr
from tvm.tir import stmt as _stmt
from .. import api as _api
from ..tensor import Tensor
#pylint: disable=invalid-name
np_arg_types = tuple(list(numeric_types) + [numpy.ndarray])
tvm_arg_types = (Tensor, Array, _expr.Var, _expr.ConstExpr)
halide_imm_types = (_expr.IntImm, _expr.FloatImm)
def _internal_assert(cond, err):
"""Simplify the code segment like if not XXX then raise an error"""
if not cond:
raise ValueError(err)
# Useful constants. In avoid of runtime dependences, we use function calls to return them.
def make_nop():
"""Returns a 'no operation' node in HalideIR."""
return _stmt.Evaluate(_api.const(0, dtype='int32'))
def is_docstring(node):
"""Checks if a Python AST node is a docstring"""
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)
def _pruned_source(func):
"""Prune source code's extra leading spaces"""
try:
lines = inspect.getsource(func).split('\n')
leading_space = len(lines[0]) - len(lines[0].lstrip(' '))
lines = [line[leading_space:] for line in lines]
return '\n'.join(lines)
except IOError as err:
if sys.version_info[0] == 2 and str(err) == 'could not get source code':
logging.log(logging.CRITICAL, \
'This module is not fully operated under Python2... ' \
'Please move to Python3!')
raise err
def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
# pylint: disable=import-outside-toplevel
from .. import ir_pass
def replace(op):
if isinstance(op, _stmt.Provide) and op.func in rmap.keys():
buf = rmap[op.func]
return _stmt.Provide(buf.op, op.value_index, op.value, op.args)
if isinstance(op, _expr.Call) and op.func in rmap.keys():
buf = rmap[op.func]
return _expr.Call(buf.dtype, buf.name, op.args, \
_expr.Call.Halide, buf.op, buf.value_index)
return None
return ir_pass.IRTransform(body, None, replace, ['Provide', 'Call'])
def _is_tvm_arg_types(args):
"""Determine a list of element is either a list of tvm arguments of a list of numpy arguments.
If neither is true, raise a value error."""
if isinstance(args[0], tvm_arg_types):
for elem in args[1:]:
_internal_assert(isinstance(elem, tvm_arg_types),
"Expecting a Var, Tensor or ConstExpr instance but %s get!" \
% str(type(elem)))
return True
_internal_assert(isinstance(args[0], np_arg_types), \
"Expect a numpy type but %s get!" % str(type(args[0])))
for elem in args[1:]:
_internal_assert(isinstance(elem, np_arg_types), \
"Expect a numpy type but %s get!" % str(type(elem)))
return False
| 36.663551
| 98
| 0.664797
|
93f585410c26310319af9f492d5e168e08a36358
| 2,835
|
py
|
Python
|
google/appengine/tools/devappserver2/python/pdb_sandbox.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 26
|
2015-01-20T08:02:38.000Z
|
2020-06-10T04:57:41.000Z
|
google/appengine/tools/devappserver2/python/pdb_sandbox.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 53
|
2016-04-06T21:10:43.000Z
|
2018-03-19T23:14:33.000Z
|
google/appengine/tools/devappserver2/python/pdb_sandbox.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 23
|
2016-04-19T05:45:26.000Z
|
2021-12-31T23:22:36.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Modify pdb to work with the devappserver2 sandbox."""
import sys
import threading
def install(config):
"""Install the necessary changes to pdb.
Monkeypatch pdb so that it can be used in the devappserver sandbox. Must
be called after the sandbox has been installed but before stdin/stdout
objects have been reassigned.
Args:
config: The runtime_config_pb2.Config to use to configure the sandbox.
"""
# Import here (i.e. after sandbox installed) to get the post sandbox pdb.
# Extremely important so that we monkeypatch the same pdb the apps can
# import.
import pdb as pdb_postsandbox
# Save stdin/stdout as the references will not be available when user
# code runs.
real_stdin = sys.stdin
real_stdout = sys.stdout
# Capture the original Pdb so we can forward the __init__ call after
# monkeypatching (if not captured, forwarding the call results in infinite
# recursion).
pdb_premonkeypatch = pdb_postsandbox.Pdb
if config.threadsafe or config.max_instances != 1:
warning = """
********************************************************************************
* WARNING: please read before using PDB:
* https://developers.google.com/appengine/docs/python/tools/devserver#Python_Debugging_with_PDB
********************************************************************************
"""
lock = threading.Lock()
else:
warning = ''
class _Pdb(pdb_postsandbox.Pdb):
_warning_written = False
# TODO: improve argument handling so if new arguments are added
# in the future or the defaults change, this does not need to be updated.
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None):
if stdin is None:
stdin = real_stdin
if stdout is None:
stdout = real_stdout
# Pdb is old style class so no super().
pdb_premonkeypatch.__init__(self, completekey, stdin, stdout, skip)
if warning:
with lock:
# Note: while the goal is to write the warning only one time, it
# may be written multiple times (once each per instance).
if not _Pdb._warning_written:
stdout.write(warning)
_Pdb._warning_written = True
pdb_postsandbox.Pdb = _Pdb
| 35
| 95
| 0.676543
|
aa01fdda715fcfcd4c5da83e593b9f720404ea28
| 734
|
py
|
Python
|
mongodb/factory/results/_base.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 5
|
2020-08-26T20:12:00.000Z
|
2020-12-11T16:39:22.000Z
|
mongodb/factory/results/_base.py
|
RaenonX/Jelly-Bot
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 234
|
2019-12-14T03:45:19.000Z
|
2020-08-26T18:55:19.000Z
|
mongodb/factory/results/_base.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 2
|
2019-10-23T15:21:15.000Z
|
2020-05-22T09:35:55.000Z
|
from abc import ABC
from dataclasses import dataclass
from typing import Optional
from JellyBot.api.static import result
from models import Model
from ._outcome import BaseOutcome
@dataclass
class BaseResult(ABC):
outcome: BaseOutcome
exception: Optional[Exception]
def serialize(self) -> dict:
return {result.Results.EXCEPTION: repr(self.exception),
result.Results.OUTCOME: self.outcome.code}
@property
def success(self) -> bool:
return self.outcome.is_success
@dataclass
class ModelResult(BaseResult, ABC):
model: Optional[Model]
def serialize(self) -> dict:
d = super().serialize()
d.update(**{result.Results.MODEL: self.model})
return d
| 22.242424
| 63
| 0.69346
|
32d32a550a0b7898741dcdbb634c7d5e6ee484fb
| 38,876
|
py
|
Python
|
models/swin_transformer.py
|
rahulmangalampalli/esvit
|
5caf6e36b088ae2e7aaa4100b307eec991078e3e
|
[
"MIT"
] | 237
|
2021-07-14T21:06:56.000Z
|
2022-03-31T02:38:56.000Z
|
models/swin_transformer.py
|
rahulmangalampalli/esvit
|
5caf6e36b088ae2e7aaa4100b307eec991078e3e
|
[
"MIT"
] | 18
|
2021-08-05T09:00:03.000Z
|
2022-03-22T03:11:53.000Z
|
models/swin_transformer.py
|
rahulmangalampalli/esvit
|
5caf6e36b088ae2e7aaa4100b307eec991078e3e
|
[
"MIT"
] | 25
|
2021-07-14T20:32:28.000Z
|
2022-03-31T19:34:53.000Z
|
# --------------------------------------------------------
# Modified by Chunyuan Li (chunyl@microsoft.com)
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Written by Ze Liu
# --------------------------------------------------------
import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
import torch.distributed as dist
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from .registry import register_model
import numpy as np
from math import sqrt
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super(Mlp, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super(WindowAttention, self).__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2 Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn_out = attn
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn_out
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
@staticmethod
def compute_macs(module, input, output):
B, N, C = input[0].shape
module.__flops__ += module.flops(N) * B
class SwinTransformerBlock(nn.Module):
r"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.H = input_resolution[0]
self.W = input_resolution[1]
self.attn_mask_dict = {} # {self.H: self.create_attn_mask(self.H, self.W)}
# if self.shift_size > 0:
# # calculate attention mask for SW-MSA
# H, W = self.input_resolution
# img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
# img_mask[:, :-self.window_size, :-self.window_size, :] = 0
# img_mask[:, -self.shift_size:, -self.shift_size:, :] = 1
# img_mask[:, -self.shift_size:, :-self.window_size, :] = 2
# img_mask[:, -self.shift_size:, -self.window_size:-self.shift_size, :] = 3
# img_mask[:, :-self.window_size, -self.shift_size:, :] = 4
# img_mask[:, :-self.window_size, -self.window_size:-self.shift_size, :] = 5
# img_mask[:, -self.window_size:-self.shift_size, -self.shift_size:, :] = 6
# img_mask[:, -self.window_size:-self.shift_size, :-self.window_size, :] = 7
# img_mask[:, -self.window_size:-self.shift_size, -self.window_size:-self.shift_size, :] = 8
# mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
# mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
# attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
# attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
# else:
# attn_mask = None
# self.register_buffer("attn_mask", attn_mask)
def create_attn_mask(self, H, W):
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1)) # 1 Hp Wp 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self, x):
B, L, C = x.shape
H = int(sqrt(L))
W = H
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
if H is self.attn_mask_dict.keys():
attn_mask = self.attn_mask_dict[H]
else:
self.attn_mask_dict[H] = self.create_attn_mask(H, W).to(x.device)
attn_mask = self.attn_mask_dict[H]
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows, attn = self.attn(x_windows, attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, attn
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size} mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r"""Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
# def forward(self, x):
# """
# x: B, H*W, C
# """
# H, W = self.input_resolution
# B, L, C = x.shape
# # assert L == H * W, "input feature has wrong size"
# assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
# x = x.view(B, H, W, C)
# x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
# x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
# x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
# x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
# x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
# x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
# x = self.norm(x)
# x = self.reduction(x)
# return x
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
H = int(sqrt(L))
W = H
x = x.view(B, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
x, _ = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def forward_with_features(self, x):
fea = []
for blk in self.blocks:
x, _ = blk(x)
fea.append(x)
if self.downsample is not None:
x = self.downsample(x)
return x, fea
def forward_with_attention(self, x):
attns = []
for blk in self.blocks:
x, attn = blk(x)
attns.append(attn)
if self.downsample is not None:
x = self.downsample(x)
return x, attns
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# # FIXME look at relaxing size constraints
# assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
# class PatchEmbed(nn.Module):
# """ Image to Patch Embedding
# """
# def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
# super().__init__()
# num_patches = (img_size // patch_size) * (img_size // patch_size)
# self.img_size = img_size
# self.patch_size = patch_size
# self.num_patches = num_patches
# self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
# def forward(self, x):
# B, C, H, W = x.shape
# x = self.proj(x).flatten(2).transpose(1, 2)
# return x
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size.
patch_size (int | tuple(int)): Patch size.
in_chans (int): Number of input channels.
num_classes (int): Number of classes for classification head.
embed_dim (int): Embedding dimension.
depths (tuple(int)): Depth of Swin Transformer layers.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: Truee
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate.
drop_path_rate (float): Stochastic depth rate.
norm_layer (nn.Module): normalization layer.
ape (bool): If True, add absolute position embedding to the patch embedding.
patch_norm (bool): If True, add normalization after patch embedding.
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_dense_prediction=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# Region prediction head
self.use_dense_prediction = use_dense_prediction
if self.use_dense_prediction: self.head_dense = None
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
# todo: to be implemented
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x_region = self.norm(x) # B L C
x = self.avgpool(x_region.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
if self.use_dense_prediction:
return x, x_region
else:
return x
def forward_feature_maps(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x_grid = self.norm(x) # B L C
x = self.avgpool(x_grid.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x, x_grid
def forward(self, x):
# convert to list
if not isinstance(x, list):
x = [x]
# Perform forward pass separately on each resolution input.
# The inputs corresponding to a single resolution are clubbed and single
# forward is run on the same resolution inputs. Hence we do several
# forward passes = number of different resolutions used. We then
# concatenate all the output features.
# When region level prediction task is used, the network output four variables:
# self.head(output_cls): view-level prob vector
# self.head_dense(output_fea): regioin-level prob vector
# output_fea: region-level feature map (grid features)
# npatch: number of patches per view
idx_crops = torch.cumsum(torch.unique_consecutive(
torch.tensor([inp.shape[-1] for inp in x]),
return_counts=True,
)[1], 0)
if self.use_dense_prediction:
start_idx = 0
for end_idx in idx_crops:
_out_cls, _out_fea = self.forward_features(torch.cat(x[start_idx: end_idx]))
B, N, C = _out_fea.shape
if start_idx == 0:
output_cls = _out_cls
output_fea = _out_fea.reshape(B * N, C)
npatch = [N]
else:
output_cls = torch.cat((output_cls, _out_cls))
output_fea = torch.cat((output_fea, _out_fea.reshape(B * N, C) ))
npatch.append(N)
start_idx = end_idx
return self.head(output_cls), self.head_dense(output_fea), output_fea, npatch
else:
start_idx = 0
for end_idx in idx_crops:
_out = self.forward_features(torch.cat(x[start_idx: end_idx]))
if start_idx == 0:
output = _out
else:
output = torch.cat((output, _out))
start_idx = end_idx
# Run the head forward on the concatenated features.
return self.head(output)
def forward_selfattention(self, x, n=1):
# n=1 return the last layer attn map; otherwise return attn maps in all layers
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
if n==1:
return self.forward_last_selfattention(x)
else:
return self.forward_all_selfattention(x)
def forward_last_selfattention(self, x):
for i, layer in enumerate(self.layers):
if i < len(self.layers) - 1:
x = layer(x)
else:
x, attns = layer.forward_with_attention(x)
return attns[-1]
def forward_all_selfattention(self, x):
attn_out = []
for layer in self.layers:
x, attns = layer.forward_with_attention(x)
attn_out += attns
return attn_out
def forward_return_n_last_blocks(self, x, n=1, return_patch_avgpool=False, depth=[]):
num_blks = sum(depth)
start_idx = num_blks - n
sum_cur = 0
for i, d in enumerate(depth):
sum_cur_new = sum_cur + d
if start_idx >= sum_cur and start_idx < sum_cur_new:
start_stage = i
start_blk = start_idx - sum_cur
sum_cur = sum_cur_new
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
# we will return the averaged token features from the `n` last blocks
# note: there is no [CLS] token in Swin Transformer
output = []
s = 0
for i, layer in enumerate(self.layers):
x, fea = layer.forward_with_features(x)
if i >= start_stage:
for x_ in fea[start_blk:]:
if i == len(self.layers)-1: # use the norm in the last stage
x_ = self.norm(x_)
x_avg = torch.flatten(self.avgpool(x_.transpose(1, 2)), 1) # B C
# print(f'Stage {i}, x_avg {x_avg.shape}')
output.append(x_avg)
start_blk = 0
return torch.cat(output, dim=-1)
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
if dist.get_rank() == 0:
print(f"GFLOPs layer_{i}: {layer.flops() / 1e9}")
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
def init_weights(self, pretrained='', pretrained_layers=[], verbose=True):
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location='cpu')
logging.info(f'=> loading pretrained model {pretrained}')
model_dict = self.state_dict()
pretrained_dict = {
k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (
k.split('.')[0] in pretrained_layers
or pretrained_layers[0] is '*'
or 'relative_position_index' not in k
or 'attn_mask' not in k
)
if need_init:
if verbose:
logging.info(f'=> init {k} from {pretrained}')
if 'relative_position_bias_table' in k and v.size() != model_dict[k].size():
relative_position_bias_table_pretrained = v
relative_position_bias_table_current = model_dict[k]
L1, nH1 = relative_position_bias_table_pretrained.size()
L2, nH2 = relative_position_bias_table_current.size()
if nH1 != nH2:
logging.info(f"Error in loading {k}, passing")
else:
if L1 != L2:
logging.info(
'=> load_pretrained: resized variant: {} to {}'
.format((L1, nH1), (L2, nH2))
)
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2),
mode='bicubic')
v = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)
if 'absolute_pos_embed' in k and v.size() != model_dict[k].size():
absolute_pos_embed_pretrained = v
absolute_pos_embed_current = model_dict[k]
_, L1, C1 = absolute_pos_embed_pretrained.size()
_, L2, C2 = absolute_pos_embed_current.size()
if C1 != C1:
logging.info(f"Error in loading {k}, passing")
else:
if L1 != L2:
logging.info(
'=> load_pretrained: resized variant: {} to {}'
.format((1, L1, C1), (1, L2, C2))
)
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.reshape(-1, S1, S1, C1)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.permute(0, 3, 1, 2)
absolute_pos_embed_pretrained_resized = torch.nn.functional.interpolate(
absolute_pos_embed_pretrained, size=(S2, S2), mode='bicubic')
v = absolute_pos_embed_pretrained_resized.permute(0, 2, 3, 1).flatten(1, 2)
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
def freeze_pretrained_layers(self, frozen_layers=[]):
for name, module in self.named_modules():
if (
name.split('.')[0] in frozen_layers
or '.'.join(name.split('.')[0:2]) in frozen_layers
or (len(frozen_layers) > 0 and frozen_layers[0] is '*')
):
for _name, param in module.named_parameters():
param.requires_grad = False
logging.info(
'=> set param {} requires grad to False'
.format(name)
)
for name, param in self.named_parameters():
if (
name.split('.')[0] in frozen_layers
or (len(frozen_layers) > 0 and frozen_layers[0] is '*')
and param.requires_grad is True
):
param.requires_grad = False
logging.info(
'=> set param {} requires grad to False'
.format(name)
)
return self
@register_model
def get_cls_model(config, is_teacher=False, use_dense_prediction=False, **kwargs):
swin_spec = config.MODEL.SPEC
swin = SwinTransformer(
img_size=config.TRAIN.IMAGE_SIZE[0],
in_chans=3,
num_classes=config.MODEL.NUM_CLASSES,
patch_size=swin_spec['PATCH_SIZE'],
embed_dim=swin_spec['DIM_EMBED'],
depths=swin_spec['DEPTHS'],
num_heads=swin_spec['NUM_HEADS'],
window_size=swin_spec['WINDOW_SIZE'],
mlp_ratio=swin_spec['MLP_RATIO'],
qkv_bias=swin_spec['QKV_BIAS'],
drop_rate=swin_spec['DROP_RATE'],
attn_drop_rate=swin_spec['ATTN_DROP_RATE'],
drop_path_rate= 0.0 if is_teacher else swin_spec['DROP_PATH_RATE'],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
ape=swin_spec['USE_APE'],
patch_norm=swin_spec['PATCH_NORM'],
use_dense_prediction=use_dense_prediction,
)
if config.MODEL.INIT_WEIGHTS:
swin.init_weights(
config.MODEL.PRETRAINED,
config.MODEL.PRETRAINED_LAYERS,
config.VERBOSE
)
# freeze the specified pre-trained layers (if any)
if config.FINETUNE.FINETUNE:
swin.freeze_pretrained_layers(config.FINETUNE.FROZEN_LAYERS)
return swin
| 39.62895
| 119
| 0.565362
|
c16c50e4d7f6ab448d8f8f9e9d196d9ba821e522
| 2,357
|
py
|
Python
|
applications/kit/models/logger.py
|
Ayunken/my_web2py
|
6f8ddf531b72ca5c3d6472a5ed3bd279b7be9260
|
[
"BSD-3-Clause"
] | null | null | null |
applications/kit/models/logger.py
|
Ayunken/my_web2py
|
6f8ddf531b72ca5c3d6472a5ed3bd279b7be9260
|
[
"BSD-3-Clause"
] | null | null | null |
applications/kit/models/logger.py
|
Ayunken/my_web2py
|
6f8ddf531b72ca5c3d6472a5ed3bd279b7be9260
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging, logging.handlers
from gluon import current
class GAEHandler(logging.Handler):
"""
Logging handler for GAE DataStore
"""
def emit(self, record):
from google.appengine.ext import db
class Log(db.Model):
name = db.StringProperty()
level = db.StringProperty()
module = db.StringProperty()
func_name = db.StringProperty()
line_no = db.IntegerProperty()
thread = db.IntegerProperty()
thread_name = db.StringProperty()
process = db.IntegerProperty()
message = db.StringProperty(multiline=True)
args = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
log = Log()
log.name = record.name
log.level = record.levelname
log.module = record.module
log.func_name = record.funcName
log.line_no = record.lineno
log.thread = record.thread
log.thread_name = record.threadName
log.process = record.process
log.message = record.msg
log.args = str(record.args)
log.put()
def get_configured_logger(name):
logger = logging.getLogger(name)
if (len(logger.handlers) == 0):
# This logger has no handlers, so we can assume it hasn't yet been configured
# (Configure logger)
# Create default handler
if request.env.web2py_runtime_gae:
# Create GAEHandler
handler = GAEHandler()
else:
# Create RotatingFileHandler
import os
formatter="%(asctime)s %(levelname)s %(process)s %(thread)s %(funcName)s():%(lineno)d %(message)s"
handler = logging.handlers.RotatingFileHandler(os.path.join(request.folder,'private/app.log'),maxBytes=10000000,backupCount=2)
handler.setFormatter(logging.Formatter(formatter))
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Test entry:
logger.debug(name + ' logger created')
else:
# Test entry:
#logger.debug(name + ' already exists')
pass
return logger
# Assign application logger to a global var
logger = get_configured_logger(request.application)
current.logger=logger
| 33.197183
| 138
| 0.619431
|
3817cda08521634b3ce407846479800844c0ded3
| 1,235
|
py
|
Python
|
aws_lambda_powertools/utilities/parser/models/kinesis.py
|
whardier/aws-lambda-powertools-python
|
8de372914ade16f18a72484dbced8f4aba6de592
|
[
"Apache-2.0",
"MIT-0"
] | null | null | null |
aws_lambda_powertools/utilities/parser/models/kinesis.py
|
whardier/aws-lambda-powertools-python
|
8de372914ade16f18a72484dbced8f4aba6de592
|
[
"Apache-2.0",
"MIT-0"
] | null | null | null |
aws_lambda_powertools/utilities/parser/models/kinesis.py
|
whardier/aws-lambda-powertools-python
|
8de372914ade16f18a72484dbced8f4aba6de592
|
[
"Apache-2.0",
"MIT-0"
] | null | null | null |
import base64
import logging
from binascii import Error as BinAsciiError
from typing import List, Union
from pydantic import BaseModel, validator
from pydantic.types import PositiveInt
from aws_lambda_powertools.utilities.parser.types import Literal, Model
logger = logging.getLogger(__name__)
class KinesisDataStreamRecordPayload(BaseModel):
kinesisSchemaVersion: str
partitionKey: str
sequenceNumber: PositiveInt
data: Union[bytes, Model] # base64 encoded str is parsed into bytes
approximateArrivalTimestamp: float
@validator("data", pre=True, allow_reuse=True)
def data_base64_decode(cls, value):
try:
logger.debug("Decoding base64 Kinesis data record before parsing")
return base64.b64decode(value)
except (BinAsciiError, TypeError):
raise ValueError("base64 decode failed")
class KinesisDataStreamRecord(BaseModel):
eventSource: Literal["aws:kinesis"]
eventVersion: str
eventID: str
eventName: Literal["aws:kinesis:record"]
invokeIdentityArn: str
awsRegion: str
eventSourceARN: str
kinesis: KinesisDataStreamRecordPayload
class KinesisDataStreamModel(BaseModel):
Records: List[KinesisDataStreamRecord]
| 28.72093
| 78
| 0.752227
|
f964f4d479c5dda919491a22096f74a6bbf64fd8
| 15,580
|
py
|
Python
|
xknx/devices/climate_mode.py
|
phbaer/xknx
|
00b7cd4bda79d8bd6f8408a3596ab77a644b6c35
|
[
"MIT"
] | null | null | null |
xknx/devices/climate_mode.py
|
phbaer/xknx
|
00b7cd4bda79d8bd6f8408a3596ab77a644b6c35
|
[
"MIT"
] | null | null | null |
xknx/devices/climate_mode.py
|
phbaer/xknx
|
00b7cd4bda79d8bd6f8408a3596ab77a644b6c35
|
[
"MIT"
] | null | null | null |
"""
Module for managing the climate mode.
Climate modes can be 'auto', 'comfort', 'standby', 'economy' or 'protection'.
"""
from xknx.exceptions import CouldNotParseTelegram, DeviceIllegalValue
from xknx.knx import (
DPTArray, DPTBinary, DPTControllerStatus, DPTHVACContrMode, DPTHVACMode,
GroupAddress, HVACOperationMode)
from .device import Device
class ClimateMode(Device):
"""Class for managing the climate mode."""
# pylint: disable=invalid-name,too-many-instance-attributes
def __init__(self,
xknx,
name,
group_address_operation_mode=None,
group_address_operation_mode_state=None,
group_address_operation_mode_protection=None,
group_address_operation_mode_night=None,
group_address_operation_mode_comfort=None,
group_address_controller_status=None,
group_address_controller_status_state=None,
group_address_controller_mode=None,
group_address_controller_mode_state=None,
operation_modes=None,
device_updated_cb=None):
"""Initialize ClimateMode class."""
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements
super().__init__(xknx, name, device_updated_cb)
if isinstance(group_address_operation_mode, (str, int)):
group_address_operation_mode = GroupAddress(group_address_operation_mode)
if isinstance(group_address_operation_mode_state, (str, int)):
group_address_operation_mode_state = GroupAddress(group_address_operation_mode_state)
if isinstance(group_address_operation_mode_protection, (str, int)):
group_address_operation_mode_protection = GroupAddress(group_address_operation_mode_protection)
if isinstance(group_address_operation_mode_night, (str, int)):
group_address_operation_mode_night = GroupAddress(group_address_operation_mode_night)
if isinstance(group_address_operation_mode_comfort, (str, int)):
group_address_operation_mode_comfort = GroupAddress(group_address_operation_mode_comfort)
if isinstance(group_address_controller_status, (str, int)):
group_address_controller_status = GroupAddress(group_address_controller_status)
if isinstance(group_address_controller_status_state, (str, int)):
group_address_controller_status_state = GroupAddress(group_address_controller_status_state)
if isinstance(group_address_controller_mode, (str, int)):
group_address_controller_mode = GroupAddress(group_address_controller_mode)
if isinstance(group_address_controller_mode_state, (str, int)):
group_address_controller_mode_state = GroupAddress(group_address_controller_mode_state)
self.group_address_operation_mode = group_address_operation_mode
self.group_address_operation_mode_state = group_address_operation_mode_state
self.group_address_operation_mode_protection = group_address_operation_mode_protection
self.group_address_operation_mode_night = group_address_operation_mode_night
self.group_address_operation_mode_comfort = group_address_operation_mode_comfort
self.group_address_controller_status = group_address_controller_status
self.group_address_controller_status_state = group_address_controller_status_state
self.group_address_controller_mode = group_address_controller_mode
self.group_address_controller_mode_state = group_address_controller_mode_state
self.operation_mode = HVACOperationMode.STANDBY
self.operation_modes_ = []
if operation_modes is None:
self.operation_modes_ = self.guess_operation_modes()
else:
for mode in operation_modes:
if isinstance(mode, str):
self.operation_modes_.append(HVACOperationMode[mode])
elif isinstance(mode, HVACOperationMode):
self.operation_modes_.append(mode)
self.supports_operation_mode = \
group_address_operation_mode is not None or \
group_address_operation_mode_state is not None or \
group_address_operation_mode_protection is not None or \
group_address_operation_mode_night is not None or \
group_address_operation_mode_comfort is not None or \
group_address_controller_status is not None or \
group_address_controller_status_state is not None or \
group_address_controller_mode is not None or \
group_address_controller_mode_state is not None
@classmethod
def from_config(cls, xknx, name, config):
"""Initialize object from configuration structure."""
# pylint: disable=too-many-locals
group_address_operation_mode = \
config.get('group_address_operation_mode')
group_address_operation_mode_state = \
config.get('group_address_operation_mode_state')
group_address_operation_mode_protection = \
config.get('group_address_operation_mode_protection')
group_address_operation_mode_night = \
config.get('group_address_operation_mode_night')
group_address_operation_mode_comfort = \
config.get('group_address_operation_mode_comfort')
group_address_controller_status = \
config.get('group_address_controller_status')
group_address_controller_status_state = \
config.get('group_address_controller_status_state')
group_address_controller_mode = \
config.get('group_address_controller_mode')
group_address_controller_mode_state = \
config.get('group_address_controller_mode_state')
return cls(xknx,
name,
group_address_operation_mode=group_address_operation_mode,
group_address_operation_mode_state=group_address_operation_mode_state,
group_address_operation_mode_protection=group_address_operation_mode_protection,
group_address_operation_mode_night=group_address_operation_mode_night,
group_address_operation_mode_comfort=group_address_operation_mode_comfort,
group_address_controller_status=group_address_controller_status,
group_address_controller_status_state=group_address_controller_status_state,
group_address_controller_mode=group_address_controller_mode,
group_address_controller_mode_state=group_address_controller_mode_state)
def has_group_address(self, group_address):
"""Test if device has given group address."""
return group_address in \
[self.group_address_operation_mode,
self.group_address_operation_mode_state,
self.group_address_operation_mode_protection,
self.group_address_operation_mode_night,
self.group_address_operation_mode_comfort,
self.group_address_controller_status,
self.group_address_controller_status_state,
self.group_address_controller_mode,
self.group_address_controller_mode_state]
async def _set_internal_operation_mode(self, operation_mode):
"""Set internal value of operation mode. Call hooks if operation mode was changed."""
if operation_mode != self.operation_mode:
self.operation_mode = operation_mode
await self.after_update()
async def set_operation_mode(self, operation_mode):
"""Set the operation mode of a thermostat. Send new operation_mode to BUS and update internal state."""
if not self.supports_operation_mode:
raise DeviceIllegalValue("operation mode not supported", operation_mode)
if self.group_address_operation_mode is not None:
await self.send(
self.group_address_operation_mode,
DPTArray(DPTHVACMode.to_knx(operation_mode)))
if self.group_address_operation_mode_protection is not None:
protection_mode = operation_mode == HVACOperationMode.FROST_PROTECTION
await self.send(
self.group_address_operation_mode_protection,
DPTBinary(protection_mode))
if self.group_address_operation_mode_night is not None:
night_mode = operation_mode == HVACOperationMode.NIGHT
await self.send(
self.group_address_operation_mode_night,
DPTBinary(night_mode))
if self.group_address_operation_mode_comfort is not None:
comfort_mode = operation_mode == HVACOperationMode.COMFORT
await self.send(
self.group_address_operation_mode_comfort,
DPTBinary(comfort_mode))
if self.group_address_controller_status is not None:
await self.send(
self.group_address_controller_status,
DPTArray(DPTControllerStatus.to_knx(operation_mode)))
if self.group_address_controller_mode is not None:
await self.send(
self.group_address_controller_mode,
DPTArray(DPTHVACContrMode.to_knx(operation_mode)))
await self._set_internal_operation_mode(operation_mode)
@property
def operation_modes(self):
"""Return all configured operation modes."""
if not self.supports_operation_mode:
return []
return self.operation_modes_
def guess_operation_modes(self):
"""Guess operation modes from group addresses."""
# All operation modes supported
if self.group_address_operation_mode is not None:
return [HVACOperationMode.AUTO, HVACOperationMode.COMFORT,
HVACOperationMode.STANDBY, HVACOperationMode.NIGHT,
HVACOperationMode.FROST_PROTECTION]
if self.group_address_controller_status is not None:
return [HVACOperationMode.COMFORT, HVACOperationMode.STANDBY,
HVACOperationMode.NIGHT, HVACOperationMode.FROST_PROTECTION]
if self.group_address_controller_mode is not None:
return [HVACOperationMode.STANDBY, HVACOperationMode.AUTO, HVACOperationMode.HEAT,
HVACOperationMode.COOL, HVACOperationMode.FAN_ONLY, HVACOperationMode.DRY]
# Operation modes only supported partially
operation_modes = []
if self.group_address_operation_mode_comfort:
operation_modes.append(HVACOperationMode.COMFORT)
operation_modes.append(HVACOperationMode.STANDBY)
if self.group_address_operation_mode_night:
operation_modes.append(HVACOperationMode.NIGHT)
if self.group_address_operation_mode_protection:
operation_modes.append(HVACOperationMode.FROST_PROTECTION)
return operation_modes
async def process_group_write(self, telegram):
"""Process incoming GROUP WRITE telegram."""
if self.supports_operation_mode and \
telegram.group_address == self.group_address_operation_mode or \
telegram.group_address == self.group_address_operation_mode_state:
await self._process_operation_mode(telegram)
elif self.supports_operation_mode and \
telegram.group_address == self.group_address_controller_mode or \
telegram.group_address == self.group_address_controller_mode_state:
await self._process_controller_mode(telegram)
elif self.supports_operation_mode and \
telegram.group_address == self.group_address_controller_status or \
telegram.group_address == self.group_address_controller_status_state:
await self._process_controller_status(telegram)
# Note: telegrams setting splitted up operation modes are not yet implemented
async def _process_operation_mode(self, telegram):
"""Process incoming telegram for operation mode."""
if not isinstance(telegram.payload, DPTArray) \
or len(telegram.payload.value) != 1:
raise CouldNotParseTelegram("invalid payload", payload=telegram.payload, device_name=self.name)
operation_mode = DPTHVACMode.from_knx(telegram.payload.value)
await self._set_internal_operation_mode(operation_mode)
async def _process_controller_mode(self, telegram):
"""Process incoming telegram for controller mode."""
if not isinstance(telegram.payload, DPTArray) \
or len(telegram.payload.value) != 1:
raise CouldNotParseTelegram("invalid payload", payload=telegram.payload, device_name=self.name)
operation_mode = DPTHVACContrMode.from_knx(telegram.payload.value)
await self._set_internal_operation_mode(operation_mode)
async def _process_controller_status(self, telegram):
"""Process incoming telegram for controller status."""
if not isinstance(telegram.payload, DPTArray) \
or len(telegram.payload.value) != 1:
raise CouldNotParseTelegram("invalid payload", payload=telegram.payload, device_name=self.name)
operation_mode = DPTControllerStatus.from_knx(telegram.payload.value)
await self._set_internal_operation_mode(operation_mode)
def state_addresses(self):
"""Return group addresses which should be requested to sync state."""
state_addresses = []
if self.supports_operation_mode:
if self.group_address_operation_mode_state:
state_addresses.append(self.group_address_operation_mode_state)
elif self.group_address_operation_mode:
state_addresses.append(self.group_address_operation_mode)
if self.group_address_controller_status_state:
state_addresses.append(self.group_address_controller_status_state)
elif self.group_address_controller_status:
state_addresses.append(self.group_address_controller_status)
if self.group_address_controller_mode_state:
state_addresses.append(self.group_address_controller_mode_state)
elif self.group_address_controller_mode:
state_addresses.append(self.group_address_controller_mode)
# Note: telegrams setting splitted up operation modes are not yet implemented
return state_addresses
def __str__(self):
"""Return object as readable string."""
return '<ClimateMode name="{0}" ' \
'group_address_operation_mode="{1}" ' \
'group_address_operation_mode_state="{2}" ' \
'group_address_controller_status="{3}" ' \
'group_address_controller_status_state="{4}" ' \
'group_address_controller_mode="{5}" ' \
'group_address_controller_mode_state="{6}" ' \
'/>' \
.format(
self.name,
self.group_address_operation_mode.__repr__(),
self.group_address_operation_mode_state.__repr__(),
self.group_address_controller_status.__repr__(),
self.group_address_controller_status_state.__repr__(),
self.group_address_controller_mode.__repr__(),
self.group_address_controller_mode_state.__repr__())
def __eq__(self, other):
"""Equal operator."""
return self.__dict__ == other.__dict__
| 53.910035
| 111
| 0.702632
|
1e08a75240a359a1c1c2c1988534a3320c69ce2c
| 787
|
py
|
Python
|
core/api/migrations/0007_tags.py
|
LegolasVzla/django-google-maps
|
62efd2f35f8ee9fcdd5c3b9d3b0aba934b1d6fb3
|
[
"MIT"
] | 5
|
2020-06-15T10:11:33.000Z
|
2022-01-05T19:03:52.000Z
|
backend/everpro/django-google-maps/core/api/migrations/0007_tags.py
|
Ascensiony/EverPro-Intelligence-APIs
|
41de67418a7ed266547840948301225220ddd6c9
|
[
"Apache-2.0"
] | 9
|
2020-02-12T00:54:30.000Z
|
2021-09-22T17:55:59.000Z
|
backend/everpro/django-google-maps/core/api/migrations/0007_tags.py
|
Ascensiony/EverPro-Intelligence-APIs
|
41de67418a7ed266547840948301225220ddd6c9
|
[
"Apache-2.0"
] | 6
|
2020-06-18T09:14:35.000Z
|
2021-10-16T10:00:13.000Z
|
# Generated by Django 2.2.3 on 2019-08-01 03:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_spots_postal_code'),
]
operations = [
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('is_active', models.BooleanField(default=True)),
('is_deleted', models.BooleanField(default=False)),
('updated_date', models.DateTimeField(auto_now=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
],
),
]
| 31.48
| 114
| 0.583227
|
875008626982638d5fdfc701f011c21c62054103
| 1,312
|
py
|
Python
|
setup.py
|
jmatuskey/exoctk
|
bfd7e5100014048f73baf23c964598381f691ffd
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jmatuskey/exoctk
|
bfd7e5100014048f73baf23c964598381f691ffd
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jmatuskey/exoctk
|
bfd7e5100014048f73baf23c964598381f691ffd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
REQUIRES = ['numpy',
'asteval',
'astropy',
'astroquery',
'batman-package',
'bibtexparser',
'bokeh',
'cython',
'flask',
'h5py',
'lmfit',
'matplotlib',
'numba',
'pandas',
'pysynphot',
'scipy',
'sphinx',
'svo_filters']
SETUP_REQUIRES = ['numpy']
setup(name='exoctk',
version='0.2.2',
description='Observation reduction and planning tools for exoplanet science',
cmdclass={'build_ext': build_ext},
setup_requires=SETUP_REQUIRES,
install_requires=REQUIRES,
author='The ExoCTK Group',
author_email='exoctk@gmail.com',
license='MIT',
url='https://github.com/ExoCTK/exoctk',
long_description='',
zip_safe=True,
use_2to3=False
)
| 26.77551
| 83
| 0.577744
|
51e0a7f9fc589479293e716e14def5b0a9cd403b
| 5,199
|
py
|
Python
|
tools/nntool/quantization/symmetric/kernels/linear.py
|
mfkiwl/gap_sdk
|
642b798dfdc7b85ccabe6baba295033f0eadfcd4
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/quantization/symmetric/kernels/linear.py
|
mfkiwl/gap_sdk
|
642b798dfdc7b85ccabe6baba295033f0eadfcd4
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/quantization/symmetric/kernels/linear.py
|
mfkiwl/gap_sdk
|
642b798dfdc7b85ccabe6baba295033f0eadfcd4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import numpy as np
from graph.types.linear import FcParameters
from quantization.kernels.kernel_base import KernelBase, params_type, qrec_type
from quantization.multiplicative.mulbias import (apply_multiplicative_bias,
apply_zero_offset_bias)
from quantization.new_qrec import QRec
LOG = logging.getLogger("nntool." + __name__)
@params_type(FcParameters)
@qrec_type('symmetric', 'scaled', 'scaled_ne16')
class LinearSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
details = kwargs.get('details')
in_dims, out_dims = tuple(dims[0] for dims in cls.calc_transposed_dims(params))
prepared_in_tensors = qrec.prepare_inputs(
params, in_tensors, ktype="symmetric")
prepared_in_tensors = apply_zero_offset_bias(
qrec, params, prepared_in_tensors, ktype="symmetric")
in_tensor = prepared_in_tensors[0]
# expand the weights to apply the zero offset
weights = prepared_in_tensors[1].astype(np.int32) - qrec.in_qs[1].zero_point
biases = prepared_in_tensors[2]
if details is not None:
details['min_acc'] = float("Infinity")
details['max_acc'] = float("-Infinity")
acc_q = qrec.cache.get('acc_q') or qrec.in_qs[2]
calc_q = qrec.cache.get('calc_q') or qrec.in_qs[2]
if params.has_bias:
# move biases to accumulator dtype and Q
acc_tensor = biases.copy().astype(acc_q.dtype)
if acc_q != qrec.in_qs[2]:
acc_tensor = acc_q.expand_from(acc_tensor, qrec.in_qs[2])
else:
acc_tensor = np.zeros(out_dims.shape,
dtype=acc_q.dtype)
if params.batch_size > 1:
in_tensor = in_tensor.reshape(
(params.batch_size, in_dims.size()//params.batch_size)).astype(calc_q.dtype)
acc_tensor = calc_q.expand_from(acc_tensor, acc_q)
# weights will already be transposed at import
acc_tensor = np.dot(in_tensor, weights.astype(calc_q.dtype)) + acc_tensor
details['min_acc'] = np.min(acc_tensor)
details['max_acc'] = np.max(acc_tensor)
acc_tensor = acc_q.reduce_from(acc_tensor, calc_q)
acc_tensor = apply_multiplicative_bias(qrec,
params, acc_tensor, 1, ktype="symmetric")
if params.batch_minor:
acc_tensor = acc_tensor.transpose(1, 0)
else:
# force the bit dimension of the input tensor to the bit width of the calc
# so that the dot product occurs in this precision
in_tensor = in_tensor.astype(calc_q.dtype)
in_tensor = in_tensor.reshape((in_dims.size()))
filt = params.filter.get_filter_dims()
for out_c in range(out_dims.c):
# Expand and normalize the accumulator
if calc_q != acc_q:
acc_tensor = calc_q.expand_from(acc_tensor, acc_q)
w_slice = weights[filt.srange(out_c=out_c)].reshape(
(in_dims.size()))
res = np.dot(in_tensor, w_slice)
if details is not None:
details['min_acc'] = min(
np.sum(res[res < 0]), details['min_acc'])
details['max_acc'] = min(
np.sum(res[res > 0]), details['max_acc'])
acc_tensor[out_c] += res
if calc_q != acc_q:
acc_tensor = acc_q.reduce_from(acc_tensor, calc_q)
if details is not None:
details['min_acc'] = min(
np.min(acc_tensor[out_c]), details['min_acc'])
details['max_acc'] = max(
np.max(acc_tensor[out_c]), details['max_acc'])
# details['acc_before'] = acc_tensor.copy()
acc_tensor = apply_multiplicative_bias(qrec,
params, acc_tensor, 0, ktype="symmetric")
# details['acc_after'] = acc_tensor.copy()
out_q = qrec.out_qs[0]
if qrec and out_q != acc_q:
acc_tensor = out_q.reduce_from(acc_tensor, acc_q, allow_zero_adjust=True)
return qrec.get_outputs(params, [acc_tensor], ktype="symmetric")
| 42.966942
| 92
| 0.602808
|
5265ff35f08f9f4242fd6e3c3a2c3dc880296ee0
| 4,166
|
py
|
Python
|
broadlinky/__init__.py
|
technicalpickles/broadlinky
|
e16310c5429ecff00834982931d9daacc3f38d57
|
[
"MIT"
] | 1
|
2021-09-17T00:16:56.000Z
|
2021-09-17T00:16:56.000Z
|
broadlinky/__init__.py
|
technicalpickles/broadlinky
|
e16310c5429ecff00834982931d9daacc3f38d57
|
[
"MIT"
] | null | null | null |
broadlinky/__init__.py
|
technicalpickles/broadlinky
|
e16310c5429ecff00834982931d9daacc3f38d57
|
[
"MIT"
] | null | null | null |
"""Interface for discovering/sending codes with a Broadlink device."""
import logging
import os
import re
import socket
import time
import broadlink
import yaml
_LOGGER = logging.getLogger(__name__)
class Device:
def __init__(self, broadlinky, name, state_config):
self.broadlinky = broadlinky
self.name = name
self.state_config = state_config
# TODO check mqtt state instead?
self.states = {'power': 'OFF'}
def turn_on(self):
return self.set_state('power', 'on')
def turn_off(self):
return self.set_state('power', 'off')
def set_state(self, state, value):
value = value.lower()
new_state = None
state_config = self.state_config[state]
toggle = state_config.get('toggle', False)
if value == 'on':
if toggle:
packet = toggle
else:
packet = state_config[True]
new_state = 'ON'
elif value == 'off':
if toggle:
packet = toggle
else:
if state_config.get('toggle', False):
packet = state_config[True]
else:
packet = state_config[False]
# FIXME how to handle drift?
new_state = 'OFF'
elif re.search(r"^\d+$", value):
packet = state_config[int(value)]
new_state = value
else:
packet = state_config[value]
new_state = value
self.broadlinky.send_data(packet)
if new_state is not None:
self.states[state] = new_state
return new_state
def remember_state_value_packet(self, state, value, packet):
if state not in self.state_config:
self.state_config[state] = {}
if value == 'on':
value = True
elif value == 'off':
value = False
self.state_config[state][value] = packet
self.broadlinky.save()
class Broadlinky:
"""Interface for grouping IR/RF packets into logical devices."""
def __init__(self, devices_path=None):
if devices_path is None:
devices_path = os.path.dirname(os.path.abspath(__file__)) + '/../devices.yaml'
self.devices_path = devices_path
with open(devices_path, 'r') as file:
self.devices_data = yaml.load(file)
# TODO handle multiples?
broadlinks = broadlink.discover(timeout=5)
self.broadlink = broadlinks[0]
self.broadlink.auth()
self.last_learned_packet = None
self.devices = dict((name, Device(self, name, device_commands))
for name, device_commands in self.devices_data.items())
def get_device(self, device_name):
if device_name not in self.devices:
device = Device(self, device_name, {})
self.devices[device_name] = device
self.devices_data[device_name] = device.state_config
return self.devices[device_name]
# TODO timeout argument?
def learn(self):
"""Learn an IR or RF packet for the device."""
print("Learning", end="", flush=True)
packet = None
self.broadlink.enter_learning()
while packet is None or packet == self.last_learned_packet:
print(".", end="", flush=True)
time.sleep(1)
packet = self.broadlink.check_data()
print(flush=True)
return packet
def save(self):
with open(self.devices_path, "w") as devices_file:
# TODO preserve comments?
yaml.dump(self.devices_data, devices_file)
def send_data(self, packet, retry=2):
if packet is None:
_LOGGER.debug("Empty packet.")
return True
try:
self.broadlink.send_data(packet)
except socket.timeout as error:
if retry < 1:
_LOGGER.error(error)
return False
try:
self.broadlink.auth()
except socket.timeout:
pass
return self.send_data(packet, max(0, retry-1))
return True
| 29.546099
| 90
| 0.571771
|
33148d5683b8950371169773cbaa4a36b85dc0e6
| 468
|
pyde
|
Python
|
sketches/box01/box01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4
|
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/box01/box01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/box01/box01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3
|
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
a = 0
def setup():
global chest
earth = loadImage("bluemarble.jpg")
size(400, 400, P3D)
noStroke()
chest = createShape(BOX, 180)
chest.setTexture(earth)
def draw():
global a, chest
background(51)
lights()
translate(width*.5, height*.5, 0)
sphereDetail(30)
with pushMatrix():
rotateZ(radians(frameCount))
rotateX(radians(frameCount*.5))
rotateY(radians(a))
a += 0.01
shape(chest)
| 21.272727
| 39
| 0.59188
|
ad39aea28fbab558d3f7a13aa0eec00152045cff
| 1,206
|
py
|
Python
|
migrations/versions/55e0ccaf0fd6_.py
|
AshiSaxena996/flask_project_template-master
|
109f0225ad32fcec2ad19648c5fef22b495de0c6
|
[
"MIT"
] | null | null | null |
migrations/versions/55e0ccaf0fd6_.py
|
AshiSaxena996/flask_project_template-master
|
109f0225ad32fcec2ad19648c5fef22b495de0c6
|
[
"MIT"
] | null | null | null |
migrations/versions/55e0ccaf0fd6_.py
|
AshiSaxena996/flask_project_template-master
|
109f0225ad32fcec2ad19648c5fef22b495de0c6
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 55e0ccaf0fd6
Revises: 2d86810e63ae
Create Date: 2020-05-12 14:46:45.343845
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '55e0ccaf0fd6'
down_revision = '2d86810e63ae'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('rating',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('rest_id', sa.Integer(), nullable=True),
sa.Column('rating', sa.Integer(), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('date_posted', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['rest_id'], ['restaurant.id'], ),
sa.ForeignKeyConstraint(['username'], ['user.username'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_rating_date_posted'), 'rating', ['date_posted'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_rating_date_posted'), table_name='rating')
op.drop_table('rating')
# ### end Alembic commands ###
| 30.15
| 91
| 0.682421
|
66c1027a03eb056d0770bcfba2b78ddc4d796bbe
| 2,534
|
py
|
Python
|
compute_incident_rates.py
|
ramanshahdatascience/interval_sorting_demo
|
fc4d9cc88ce1b79902430ccd930c0902010afbf0
|
[
"BSD-3-Clause"
] | 1
|
2020-01-14T15:40:42.000Z
|
2020-01-14T15:40:42.000Z
|
compute_incident_rates.py
|
ramanshahdatascience/interval_sorting_demo
|
fc4d9cc88ce1b79902430ccd930c0902010afbf0
|
[
"BSD-3-Clause"
] | 2
|
2020-04-30T20:45:03.000Z
|
2021-10-19T17:48:21.000Z
|
compute_incident_rates.py
|
ramanshahdatascience/interval_sorting_demo
|
fc4d9cc88ce1b79902430ccd930c0902010afbf0
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
import pandas as pd
from scipy.optimize import fmin, fsolve
from scipy.stats import beta
import warnings
# These parameters give a mode at the national overdose death rate of 14.3 per
# 100k per year. They offer a reasonably broad distribution with a 95% highest
# density interval of [1.896e-4, 198] per 100k per year, allowing for
# signficant multiples in either direction of the national average without
# departing from exact math.
BETA_A = 1.25
BETA_B = 1750
def impute_incidents(incidents_string):
if incidents_string == '<5':
return 2.5
else:
return float(incidents_string)
def interval_endpoint(row, cred_mass=0.9):
'''Construct highest-posterior-density credible interval for the event rate.'''
# Plug an imputed 2.5 events into the towns with <5 events
incidents = impute_incidents(row['incidents'])
population = row['population']
iw = make_interval_width(incidents, population, cred_mass=cred_mass)
left_mass = fmin(iw, 1.0 - cred_mass, ftol=1e-8, disp=False)[0]
return (beta.ppf(left_mass, BETA_A + incidents, BETA_B + population),
beta.ppf(left_mass + cred_mass, BETA_A + incidents,
BETA_B + population))
def make_interval_width(incidents, population, cred_mass=0.9):
def interval_width(left_mass):
return beta.ppf(left_mass + cred_mass, BETA_A + incidents, BETA_B + population) \
- beta.ppf(left_mass, BETA_A + incidents, BETA_B + population)
return interval_width
# The SciPy function solvers and minimizers can throw off RuntimeWarnings in
# typical use.
warnings.simplefilter('ignore', category=RuntimeWarning)
incidents = pd.read_csv('transformed/incidents.csv', index_col='municipality')
populations = pd.read_csv('transformed/populations.csv', index_col='municipality')
result = incidents.join(populations)
result['imputed_incidents'] = result['incidents'].apply(impute_incidents)
result['incidents_per_100k'] = 1e5 * result['imputed_incidents'] / result['population']
result['left_endpoint'], result['right_endpoint'] \
= zip(*result.apply(interval_endpoint, axis=1))
result['left_per_100k'] = 1e5 * result['left_endpoint']
result['right_per_100k'] = 1e5 * result['right_endpoint']
left_rank = result['left_per_100k'].rank(method='max', pct=True)
right_rank = result['right_per_100k'].rank(method='max', pct=True)
result['score'] = right_rank + 0.5 * left_rank * left_rank \
- 0.5 * right_rank * right_rank
result.to_csv('./transformed/incident_rates.csv')
| 41.540984
| 89
| 0.733623
|
96e69e7e7cf963c6ef16fce3dae67e2d3ac77d8f
| 10,127
|
py
|
Python
|
contrib/linearize/linearize-data.py
|
wkibbler/nyc3
|
cf4ffcbd4c1561d7a21c2f30cad94a0f783d74b5
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-data.py
|
wkibbler/nyc3
|
cf4ffcbd4c1561d7a21c2f30cad94a0f783d74b5
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-data.py
|
wkibbler/nyc3
|
cf4ffcbd4c1561d7a21c2f30cad94a0f783d74b5
|
[
"MIT"
] | 1
|
2019-07-21T20:59:27.000Z
|
2019-07-21T20:59:27.000Z
|
#!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Nyc3 Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import struct
import re
import os
import os.path
import sys
import hashlib
import datetime
import time
from collections import namedtuple
from binascii import hexlify, unhexlify
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hexlify(hash).decode('utf-8')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| 31.256173
| 108
| 0.693789
|
8ce7819d3e0901ecb4a9b798e1d4169a13496488
| 34,315
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/purestorage/flashblade/plugins/modules/purefb_fs.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: purefb_fs
version_added: "1.0.0"
short_description: Manage filesystemon Pure Storage FlashBlade`
description:
- This module manages filesystems on Pure Storage FlashBlade.
author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
name:
description:
- Filesystem Name.
required: true
type: str
state:
description:
- Create, delete or modifies a filesystem.
required: false
default: present
type: str
choices: [ "present", "absent" ]
eradicate:
description:
- Define whether to eradicate the filesystem on delete or leave in trash.
required: false
type: bool
default: false
size:
description:
- Volume size in M, G, T or P units. See examples.
- If size is not set at filesystem creation time the filesystem size becomes unlimited.
type: str
required: false
nfsv3:
description:
- Define whether to NFSv3 protocol is enabled for the filesystem.
required: false
type: bool
default: true
nfsv4:
description:
- Define whether to NFSv4.1 protocol is enabled for the filesystem.
required: false
type: bool
default: true
nfs_rules:
description:
- Define the NFS rules in operation.
- If not set at filesystem creation time it defaults to I(*(rw,no_root_squash))
- Supported binary options are ro/rw, secure/insecure, fileid_32bit/no_fileid_32bit,
root_squash/no_root_squash, all_squash/no_all_squash and atime/noatime
- Supported non-binary options are anonuid=#, anongid=#, sec=(sys|krb5)
required: false
type: str
smb:
description:
- Define whether to SMB protocol is enabled for the filesystem.
required: false
type: bool
default: false
smb_aclmode:
description:
- Specify the ACL mode for the SMB protocol.
- Deprecated from Purity//FB 3.1.1. Use I(access_control) instead.
required: false
type: str
default: shared
choices: [ "shared", "native" ]
http:
description:
- Define whether to HTTP/HTTPS protocol is enabled for the filesystem.
required: false
type: bool
default: false
snapshot:
description:
- Define whether a snapshot directory is enabled for the filesystem.
required: false
type: bool
default: false
writable:
description:
- Define if a filesystem is writeable.
required: false
type: bool
promote:
description:
- Promote/demote a filesystem.
- Can only demote the file-system if it is in a replica-link relationship.
required: false
type: bool
fastremove:
description:
- Define whether the fast remove directory is enabled for the filesystem.
required: false
type: bool
default: false
hard_limit:
description:
- Define whether the capacity for a filesystem is a hard limit.
- CAUTION This will cause the filesystem to go Read-Only if the
capacity has already exceeded the logical size of the filesystem.
required: false
type: bool
default: false
user_quota:
description:
- Default quota in M, G, T or P units for a user under this file system.
required: false
type: str
group_quota:
description:
- Default quota in M, G, T or P units for a group under this file system.
required: false
type: str
policy:
description:
- Filesystem policy to assign to or remove from a filesystem.
required: false
type: str
policy_state:
description:
- Add or delete a policy from a filesystem
required: false
default: present
type: str
choices: [ "absent", "present" ]
delete_link:
description:
- Define if the filesystem can be deleted even if it has a replica link
required: false
default: false
type: bool
discard_snaps:
description:
- Allow a filesystem to be demoted.
required: false
default: false
type: bool
access_control:
description:
- The access control style that is utilized for client actions such
as setting file and directory ACLs.
- Only available from Purity//FB 3.1.1
type: str
default: shared
choices: [ 'nfs', 'smb', 'shared', 'independent', 'mode-bits' ]
safeguard_acls:
description:
- Safeguards ACLs on a filesystem.
- Performs different roles depending on the filesystem protocol enabled.
- See Purity//FB documentation for detailed description.
- Only available from Purity//FB 3.1.1
type: bool
default: True
extends_documentation_fragment:
- purestorage.flashblade.purestorage.fb
"""
EXAMPLES = """
- name: Create new filesystem named foo
purefb_fs:
name: foo
size: 1T
state: present
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Delete filesystem named foo
purefb_fs:
name: foo
state: absent
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Recover filesystem named foo
purefb_fs:
name: foo
state: present
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Eradicate filesystem named foo
purefb_fs:
name: foo
state: absent
eradicate: true
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Promote filesystem named foo ready for failover
purefb_fs:
name: foo
promote: true
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Demote filesystem named foo after failover
purefb_fs:
name: foo
promote: false
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Modify attributes of an existing filesystem named foo
purefb_fs:
name: foo
size: 2T
nfsv3 : false
nfsv4 : true
user_quota: 10K
group_quota: 25M
nfs_rules: '10.21.200.0/24(ro)'
snapshot: true
fastremove: true
hard_limit: true
smb: true
state: present
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641"""
RETURN = """
"""
HAS_PURITY_FB = True
try:
from purity_fb import (
FileSystem,
ProtocolRule,
NfsRule,
SmbRule,
MultiProtocolRule,
rest,
)
except ImportError:
HAS_PURITY_FB = False
HAS_JSON = True
try:
import json
except ImportError:
HAS_JSON = False
from ansible.module_utils.basic import AnsibleModule, human_to_bytes
from ansible_collections.purestorage.flashblade.plugins.module_utils.purefb import (
get_blade,
purefb_argument_spec,
)
HARD_LIMIT_API_VERSION = "1.4"
NFSV4_API_VERSION = "1.6"
REPLICATION_API_VERSION = "1.9"
MULTIPROTOCOL_API_VERSION = "1.11"
def get_fs(module, blade):
"""Return Filesystem or None"""
fsys = []
fsys.append(module.params["name"])
try:
res = blade.file_systems.list_file_systems(names=fsys)
return res.items[0]
except Exception:
return None
def create_fs(module, blade):
"""Create Filesystem"""
changed = True
if not module.check_mode:
try:
if not module.params["nfs_rules"]:
module.params["nfs_rules"] = "*(rw,no_root_squash)"
if module.params["size"]:
size = human_to_bytes(module.params["size"])
else:
size = 0
if module.params["user_quota"]:
user_quota = human_to_bytes(module.params["user_quota"])
else:
user_quota = None
if module.params["group_quota"]:
group_quota = human_to_bytes(module.params["group_quota"])
else:
group_quota = None
api_version = blade.api_version.list_versions().versions
if HARD_LIMIT_API_VERSION in api_version:
if NFSV4_API_VERSION in api_version:
if REPLICATION_API_VERSION in api_version:
if MULTIPROTOCOL_API_VERSION in api_version:
if module.params["access_control"] == "nfs" and not (
module.params["nfsv3"] or module.params["nfsv4"]
):
module.fail_json(
msg="Cannot set access_control to nfs when NFS is not enabled."
)
if (
module.params["access_control"]
in ["smb", "independent"]
and not module.params["smb"]
):
module.fail_json(
msg="Cannot set access_control to smb or independent when SMB is not enabled."
)
if module.params["safeguard_acls"] and (
module.params["access_control"]
in ["mode-bits", "independent"]
or module.params["smb"]
):
module.fail_json(
msg="ACL Safeguarding cannot be enabled with SMB or if access_control is mode-bits or independent."
)
fs_obj = FileSystem(
name=module.params["name"],
provisioned=size,
fast_remove_directory_enabled=module.params[
"fastremove"
],
hard_limit_enabled=module.params["hard_limit"],
snapshot_directory_enabled=module.params["snapshot"],
nfs=NfsRule(
v3_enabled=module.params["nfsv3"],
v4_1_enabled=module.params["nfsv4"],
rules=module.params["nfs_rules"],
),
smb=SmbRule(enabled=module.params["smb"]),
http=ProtocolRule(enabled=module.params["http"]),
multi_protocol=MultiProtocolRule(
safeguard_acls=module.params["safeguard_acls"],
access_control_style=module.params[
"access_control"
],
),
default_user_quota=user_quota,
default_group_quota=group_quota,
)
else:
fs_obj = FileSystem(
name=module.params["name"],
provisioned=size,
fast_remove_directory_enabled=module.params[
"fastremove"
],
hard_limit_enabled=module.params["hard_limit"],
snapshot_directory_enabled=module.params["snapshot"],
nfs=NfsRule(
v3_enabled=module.params["nfsv3"],
v4_1_enabled=module.params["nfsv4"],
rules=module.params["nfs_rules"],
),
smb=SmbRule(
enabled=module.params["smb"],
acl_mode=module.params["smb_aclmode"],
),
http=ProtocolRule(enabled=module.params["http"]),
default_user_quota=user_quota,
default_group_quota=group_quota,
)
else:
fs_obj = FileSystem(
name=module.params["name"],
provisioned=size,
fast_remove_directory_enabled=module.params["fastremove"],
hard_limit_enabled=module.params["hard_limit"],
snapshot_directory_enabled=module.params["snapshot"],
nfs=NfsRule(
v3_enabled=module.params["nfsv3"],
v4_1_enabled=module.params["nfsv4"],
rules=module.params["nfs_rules"],
),
smb=ProtocolRule(enabled=module.params["smb"]),
http=ProtocolRule(enabled=module.params["http"]),
default_user_quota=user_quota,
default_group_quota=group_quota,
)
else:
fs_obj = FileSystem(
name=module.params["name"],
provisioned=size,
fast_remove_directory_enabled=module.params["fastremove"],
hard_limit_enabled=module.params["hard_limit"],
snapshot_directory_enabled=module.params["snapshot"],
nfs=NfsRule(
enabled=module.params["nfsv3"],
rules=module.params["nfs_rules"],
),
smb=ProtocolRule(enabled=module.params["smb"]),
http=ProtocolRule(enabled=module.params["http"]),
)
else:
fs_obj = FileSystem(
name=module.params["name"],
provisioned=size,
fast_remove_directory_enabled=module.params["fastremove"],
snapshot_directory_enabled=module.params["snapshot"],
nfs=NfsRule(
enabled=module.params["nfs"], rules=module.params["nfs_rules"]
),
smb=ProtocolRule(enabled=module.params["smb"]),
http=ProtocolRule(enabled=module.params["http"]),
)
blade.file_systems.create_file_systems(fs_obj)
except rest.ApiException as err:
message = json.loads(err.body)["errors"][0]["message"]
module.fail_json(
msg="Failed to create filesystem {0}. Error: {1}".format(
module.params["name"], message
)
)
if REPLICATION_API_VERSION in api_version:
if module.params["policy"]:
try:
blade.policies.list_policies(names=[module.params["policy"]])
except Exception:
_delete_fs(module, blade)
module.fail_json(
msg="Policy {0} doesn't exist.".format(module.params["policy"])
)
try:
blade.policies.create_policy_filesystems(
policy_names=[module.params["policy"]],
member_names=[module.params["name"]],
)
except Exception:
_delete_fs(module, blade)
module.fail_json(
msg="Failed to apply policy {0} when creating filesystem {1}.".format(
module.params["policy"], module.params["name"]
)
)
module.exit_json(changed=changed)
def modify_fs(module, blade):
"""Modify Filesystem"""
changed = False
mod_fs = False
attr = {}
if module.params["policy"] and module.params["policy_state"] == "present":
try:
policy = blade.policies.list_policy_filesystems(
policy_names=[module.params["policy"]],
member_names=[module.params["name"]],
)
except Exception:
module.fail_json(
msg="Policy {0} does not exist.".format(module.params["policy"])
)
if not policy.items:
try:
blade.policies.create_policy_filesystems(
policy_names=[module.params["policy"]],
member_names=[module.params["name"]],
)
mod_fs = True
except Exception:
module.fail_json(
msg="Failed to add filesystem {0} to policy {1}.".format(
module.params["name"], module.params["polict"]
)
)
if module.params["policy"] and module.params["policy_state"] == "absent":
try:
policy = blade.policies.list_policy_filesystems(
policy_names=[module.params["policy"]],
member_names=[module.params["name"]],
)
except Exception:
module.fail_json(
msg="Policy {0} does not exist.".format(module.params["policy"])
)
if len(policy.items) == 1:
try:
blade.policies.delete_policy_filesystems(
policy_names=[module.params["policy"]],
member_names=[module.params["name"]],
)
mod_fs = True
except Exception:
module.fail_json(
msg="Failed to remove filesystem {0} to policy {1}.".format(
module.params["name"], module.params["polict"]
)
)
if module.params["user_quota"]:
user_quota = human_to_bytes(module.params["user_quota"])
if module.params["group_quota"]:
group_quota = human_to_bytes(module.params["group_quota"])
fsys = get_fs(module, blade)
if fsys.destroyed:
attr["destroyed"] = False
mod_fs = True
if module.params["size"]:
if human_to_bytes(module.params["size"]) != fsys.provisioned:
attr["provisioned"] = human_to_bytes(module.params["size"])
mod_fs = True
api_version = blade.api_version.list_versions().versions
if NFSV4_API_VERSION in api_version:
v3_state = v4_state = None
if module.params["nfsv3"] and not fsys.nfs.v3_enabled:
v3_state = module.params["nfsv3"]
if not module.params["nfsv3"] and fsys.nfs.v3_enabled:
v3_state = module.params["nfsv3"]
if module.params["nfsv4"] and not fsys.nfs.v4_1_enabled:
v4_state = module.params["nfsv4"]
if not module.params["nfsv4"] and fsys.nfs.v4_1_enabled:
v4_state = module.params["nfsv4"]
if v3_state is not None or v4_state is not None:
attr["nfs"] = NfsRule(v4_1_enabled=v4_state, v3_enabled=v3_state)
mod_fs = True
if (
module.params["nfsv3"]
or module.params["nfsv4"]
and fsys.nfs.v3_enabled
or fsys.nfs.v4_1_enabled
):
if module.params["nfs_rules"] is not None:
if fsys.nfs.rules != module.params["nfs_rules"]:
attr["nfs"] = NfsRule(rules=module.params["nfs_rules"])
mod_fs = True
if module.params["user_quota"] and user_quota != fsys.default_user_quota:
attr["default_user_quota"] = user_quota
mod_fs = True
if module.params["group_quota"] and group_quota != fsys.default_group_quota:
attr["default_group_quota"] = group_quota
mod_fs = True
else:
if module.params["nfsv3"] and not fsys.nfs.enabled:
attr["nfs"] = NfsRule(enabled=module.params["nfsv3"])
mod_fs = True
if not module.params["nfsv3"] and fsys.nfs.enabled:
attr["nfs"] = NfsRule(enabled=module.params["nfsv3"])
mod_fs = True
if module.params["nfsv3"] and fsys.nfs.enabled:
if fsys.nfs.rules != module.params["nfs_rules"]:
attr["nfs"] = NfsRule(rules=module.params["nfs_rules"])
mod_fs = True
if REPLICATION_API_VERSION in api_version:
if module.params["smb"] and not fsys.smb.enabled:
if MULTIPROTOCOL_API_VERSION in api_version:
attr["smb"] = SmbRule(enabled=module.params["smb"])
else:
attr["smb"] = SmbRule(
enabled=module.params["smb"], acl_mode=module.params["smb_aclmode"]
)
mod_fs = True
if not module.params["smb"] and fsys.smb.enabled:
attr["smb"] = ProtocolRule(enabled=module.params["smb"])
mod_fs = True
if (
module.params["smb"]
and fsys.smb.enabled
and MULTIPROTOCOL_API_VERSION not in api_version
):
if fsys.smb.acl_mode != module.params["smb_aclmode"]:
attr["smb"] = SmbRule(
enabled=module.params["smb"], acl_mode=module.params["smb_aclmode"]
)
mod_fs = True
else:
if module.params["smb"] and not fsys.smb.enabled:
attr["smb"] = ProtocolRule(enabled=module.params["smb"])
mod_fs = True
if not module.params["smb"] and fsys.smb.enabled:
attr["smb"] = ProtocolRule(enabled=module.params["smb"])
mod_fs = True
if module.params["http"] and not fsys.http.enabled:
attr["http"] = ProtocolRule(enabled=module.params["http"])
mod_fs = True
if not module.params["http"] and fsys.http.enabled:
attr["http"] = ProtocolRule(enabled=module.params["http"])
mod_fs = True
if module.params["snapshot"] and not fsys.snapshot_directory_enabled:
attr["snapshot_directory_enabled"] = module.params["snapshot"]
mod_fs = True
if not module.params["snapshot"] and fsys.snapshot_directory_enabled:
attr["snapshot_directory_enabled"] = module.params["snapshot"]
mod_fs = True
if module.params["fastremove"] and not fsys.fast_remove_directory_enabled:
attr["fast_remove_directory_enabled"] = module.params["fastremove"]
mod_fs = True
if not module.params["fastremove"] and fsys.fast_remove_directory_enabled:
attr["fast_remove_directory_enabled"] = module.params["fastremove"]
mod_fs = True
if HARD_LIMIT_API_VERSION in api_version:
if not module.params["hard_limit"] and fsys.hard_limit_enabled:
attr["hard_limit_enabled"] = module.params["hard_limit"]
mod_fs = True
if module.params["hard_limit"] and not fsys.hard_limit_enabled:
attr["hard_limit_enabled"] = module.params["hard_limit"]
mod_fs = True
if MULTIPROTOCOL_API_VERSION in api_version:
if module.params["safeguard_acls"] and not fsys.multi_protocol.safeguard_acls:
attr["multi_protocol"] = MultiProtocolRule(safeguard_acls=True)
mod_fs = True
if not module.params["safeguard_acls"] and fsys.multi_protocol.safeguard_acls:
attr["multi_protocol"] = MultiProtocolRule(safeguard_acls=False)
mod_fs = True
if module.params["access_control"] != fsys.multi_protocol.access_control_style:
attr["multi_protocol"] = MultiProtocolRule(
access_control_style=module.params["access_control"]
)
mod_fs = True
if REPLICATION_API_VERSION in api_version:
if module.params["writable"] is not None:
if not module.params["writable"] and fsys.writable:
attr["writable"] = module.params["writable"]
mod_fs = True
if (
module.params["writable"]
and not fsys.writable
and fsys.promotion_status == "promoted"
):
attr["writable"] = module.params["writable"]
mod_fs = True
if module.params["promote"] is not None:
if module.params["promote"] and fsys.promotion_status != "promoted":
attr["requested_promotion_state"] = "promoted"
mod_fs = True
if not module.params["promote"] and fsys.promotion_status == "promoted":
# Demotion only allowed on filesystems in a replica-link
try:
blade.file_system_replica_links.list_file_system_replica_links(
local_file_system_names=[module.params["name"]]
).items[0]
except Exception:
module.fail_json(
msg="Filesystem {0} not demoted. Not in a replica-link".format(
module.params["name"]
)
)
attr["requested_promotion_state"] = module.params["promote"]
mod_fs = True
if mod_fs:
changed = True
if not module.check_mode:
n_attr = FileSystem(**attr)
if REPLICATION_API_VERSION in api_version:
try:
blade.file_systems.update_file_systems(
name=module.params["name"],
attributes=n_attr,
discard_non_snapshotted_data=module.params["discard_snaps"],
)
except rest.ApiException as err:
message = json.loads(err.body)["errors"][0]["message"]
module.fail_json(
msg="Failed to update filesystem {0}. Error {1}".format(
module.params["name"], message
)
)
else:
try:
blade.file_systems.update_file_systems(
name=module.params["name"], attributes=n_attr
)
except rest.ApiException as err:
message = json.loads(err.body)["errors"][0]["message"]
module.fail_json(
msg="Failed to update filesystem {0}. Error {1}".format(
module.params["name"], message
)
)
module.exit_json(changed=changed)
def _delete_fs(module, blade):
"""In module Delete Filesystem"""
api_version = blade.api_version.list_versions().versions
if NFSV4_API_VERSION in api_version:
if MULTIPROTOCOL_API_VERSION in api_version:
blade.file_systems.update_file_systems(
name=module.params["name"],
attributes=FileSystem(
nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
multi_protocol=MultiProtocolRule(access_control_style="shared"),
destroyed=True,
),
)
else:
blade.file_systems.update_file_systems(
name=module.params["name"],
attributes=FileSystem(
nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True,
),
)
else:
blade.file_systems.update_file_systems(
name=module.params["name"],
attributes=FileSystem(
nfs=NfsRule(enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True,
),
)
blade.file_systems.delete_file_systems(module.params["name"])
def delete_fs(module, blade):
"""Delete Filesystem"""
changed = True
if not module.check_mode:
try:
api_version = blade.api_version.list_versions().versions
if REPLICATION_API_VERSION in api_version:
if NFSV4_API_VERSION in api_version:
blade.file_systems.update_file_systems(
name=module.params["name"],
attributes=FileSystem(
nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True,
),
delete_link_on_eradication=module.params["delete_link"],
)
else:
blade.file_systems.update_file_systems(
name=module.params["name"],
attributes=FileSystem(
nfs=NfsRule(enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True,
),
delete_link_on_eradication=module.params["delete_link"],
)
else:
if NFSV4_API_VERSION in api_version:
blade.file_systems.update_file_systems(
name=module.params["name"],
attributes=FileSystem(
nfs=NfsRule(v3_enabled=False, v4_1_enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True,
),
)
else:
blade.file_systems.update_file_systems(
name=module.params["name"],
attributes=FileSystem(
nfs=NfsRule(enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True,
),
)
if module.params["eradicate"]:
try:
blade.file_systems.delete_file_systems(name=module.params["name"])
except Exception:
module.fail_json(
msg="Failed to delete filesystem {0}.".format(
module.params["name"]
)
)
except Exception:
module.fail_json(
msg="Failed to update filesystem {0} prior to deletion.".format(
module.params["name"]
)
)
module.exit_json(changed=changed)
def eradicate_fs(module, blade):
"""Eradicate Filesystem"""
changed = True
if not module.check_mode:
try:
blade.file_systems.delete_file_systems(name=module.params["name"])
except Exception:
module.fail_json(
msg="Failed to eradicate filesystem {0}.".format(module.params["name"])
)
module.exit_json(changed=changed)
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(
dict(
name=dict(type="str", required=True),
eradicate=dict(default="false", type="bool"),
nfsv3=dict(default="true", type="bool"),
nfsv4=dict(default="true", type="bool"),
nfs_rules=dict(type="str"),
smb=dict(default="false", type="bool"),
http=dict(default="false", type="bool"),
snapshot=dict(default="false", type="bool"),
writable=dict(type="bool"),
promote=dict(type="bool"),
fastremove=dict(default="false", type="bool"),
hard_limit=dict(default="false", type="bool"),
user_quota=dict(type="str"),
policy=dict(type="str"),
group_quota=dict(type="str"),
smb_aclmode=dict(
type="str", default="shared", choices=["shared", "native"]
),
policy_state=dict(default="present", choices=["present", "absent"]),
state=dict(default="present", choices=["present", "absent"]),
delete_link=dict(default=False, type="bool"),
discard_snaps=dict(default=False, type="bool"),
safeguard_acls=dict(default=True, type="bool"),
access_control=dict(
type="str",
default="shared",
choices=["nfs", "smb", "shared", "independent", "mode-bits"],
),
size=dict(type="str"),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_JSON:
module.fail_json(msg="json sdk is required for this module")
if not HAS_PURITY_FB:
module.fail_json(msg="purity_fb sdk is required for this module")
state = module.params["state"]
blade = get_blade(module)
fsys = get_fs(module, blade)
if module.params["eradicate"] and state == "present":
module.warn("Eradicate flag ignored without state=absent")
if state == "present" and not fsys:
create_fs(module, blade)
elif state == "present" and fsys:
modify_fs(module, blade)
elif state == "absent" and fsys and not fsys.destroyed:
delete_fs(module, blade)
elif state == "absent" and fsys and fsys.destroyed and module.params["eradicate"]:
eradicate_fs(module, blade)
elif state == "absent" and not fsys:
module.exit_json(changed=False)
module.exit_json(changed=False)
if __name__ == "__main__":
main()
| 39.306987
| 135
| 0.540609
|
3605c7a8b81cc348231862e5ad43a6a657a3fec1
| 7,232
|
py
|
Python
|
tests/test_documents.py
|
bobweston/python-documentcloud
|
dd62f705645037391671b1092f829eb340c31580
|
[
"MIT"
] | 2
|
2020-06-16T15:57:32.000Z
|
2021-01-06T18:19:08.000Z
|
tests/test_documents.py
|
bobweston/python-documentcloud
|
dd62f705645037391671b1092f829eb340c31580
|
[
"MIT"
] | null | null | null |
tests/test_documents.py
|
bobweston/python-documentcloud
|
dd62f705645037391671b1092f829eb340c31580
|
[
"MIT"
] | 4
|
2020-08-03T19:23:34.000Z
|
2022-03-12T15:35:28.000Z
|
# Future
from __future__ import division, print_function, unicode_literals
# Standard Library
from builtins import str
from datetime import datetime
# Third Party
import pytest
# DocumentCloud
from documentcloud.documents import Mention
from documentcloud.exceptions import APIError, DoesNotExistError
from documentcloud.organizations import Organization
from documentcloud.users import User
# pylint: disable=protected-access
class TestDocument:
def test_str(self, document):
assert str(document) == document.title
def test_dates(self, document):
for date_field in document.date_fields:
assert isinstance(getattr(document, date_field), datetime)
@pytest.mark.parametrize(
"attr",
[
"full_text_url",
"full_text",
"thumbnail_image_url",
"small_image",
"normal_image_url_list",
"large_image_url",
"page_text",
"json_text_url",
"pdf",
],
)
def test_getattr(self, document, attr):
assert getattr(document, attr)
@pytest.mark.parametrize(
"attr",
[
"get_full_text_url",
"get_full_text",
"get_thumbnail_image_url",
"get_small_image",
"get_normal_image_url_list",
"get_large_image_url",
"get_page_text",
"get_json_text_url",
"get_pdf",
],
)
def test_getattr_method(self, document, attr):
assert getattr(document, attr)()
@pytest.mark.parametrize(
"attr",
[
"full_text_url",
"get_full_text",
"thumbnail_image_url",
"get_small_image",
"normal_image_url_list",
"get_large_image_url",
],
)
def test_dir(self, document, attr):
assert attr in dir(document)
def test_mentions(self, client, document):
document = client.documents.search("document:{} text".format(document.id))[0]
assert document.mentions
mention = document.mentions[0]
assert mention.page
assert "<em>text</em>" in mention.text
def test_mentions_nosearch(self, document):
assert not document.mentions
def test_user(self, document):
assert document._user is None
assert isinstance(document.user, User)
assert document.user == document._user
def test_user_expanded(self, client, document):
document = client.documents.get(document.id, expand=["user"])
assert document._user is not None
assert document._user == document.user
def test_organization(self, document):
assert document._organization is None
assert isinstance(document.organization, Organization)
assert document.organization == document._organization
@pytest.mark.parametrize(
"attr",
[
"id",
"access",
"asset_url",
"canonical_url",
"created_at",
"data",
"description",
"edit_access",
"language",
"organization_id",
"page_count",
"page_spec",
"projects",
"related_article",
"published_url",
"slug",
"source",
"status",
"title",
"updated_at",
"user_id",
"pages",
"contributor",
"contributor_organization",
"contributor_organization_slug",
],
)
def test_attrs(self, document, attr):
assert getattr(document, attr)
def test_save(self, client, document):
assert document.source == "DocumentCloud"
document.source = "MuckRock"
document.save()
document = client.documents.get(document.id)
assert document.source == "MuckRock"
def test_delete(self, client, document_factory):
document = document_factory()
document.delete()
with pytest.raises(DoesNotExistError):
client.documents.get(document.id)
def test_section(self, document_factory):
document = document_factory()
assert len(document.sections) == 0
section = document.sections.create("Test Section", 0)
assert str(section) == "Test Section - p0"
assert section.page == 0
assert section == document.sections.list()[0]
class TestDocumentClient:
def test_search(self, client, document):
documents = client.documents.search("document:{} simple".format(document.id))
assert documents
def test_list(self, client):
# list and all are aliases
all_documents = client.documents.all()
my_documents = client.documents.list(user=client.user_id)
assert len(all_documents) > len(my_documents)
def test_upload_url(self, document_factory):
document = document_factory()
assert document.status == "success"
def test_public_upload(self, public_client):
with pytest.raises(APIError, match=r"403"):
public_client.documents.upload("tests/test.pdf")
def test_upload_file(self, document_factory):
pdf = open("tests/test.pdf", "rb")
document = document_factory(pdf)
assert document.status == "success"
def test_upload_file_path(self, document_factory):
document = document_factory("tests/test.pdf")
assert document.status == "success"
def test_upload_big_file(self, client, mocker):
mocker.patch("os.path.getsize", return_value=502 * 1024 * 1024)
with pytest.raises(ValueError):
client.documents.upload("tests/test.pdf")
def test_upload_dir(self, client):
documents = client.documents.upload_directory("tests/pdfs/")
assert len(documents) == 2
def test_format_upload_parameters(self, client):
with pytest.warns(UserWarning):
params = client.documents._format_upload_parameters(
"tests/test.pdf", access="private", secure=True, project=2, foo="bar"
)
assert params == {"title": "test", "access": "private", "projects": [2]}
def test_delete(self, document_factory, client):
document = document_factory()
client.documents.delete(document.id)
with pytest.raises(DoesNotExistError):
client.documents.get(document.id)
class TestMention:
def test_mention(self):
mention = Mention("page_no_42", "text")
assert str(mention) == '42 - "text"'
class TestSection:
def test_create_delete(self, document_factory):
document = document_factory()
assert len(document.sections) == 0
section = document.sections.create("Test Section", 0)
assert len(document.sections) == 1
# may not have two sections on the same page
with pytest.raises(APIError):
document.sections.create("Test Section 2", 0)
section.delete()
assert len(document.sections) == 0
def test_str(self, document):
assert str(document.sections[0])
def test_page(self, document):
assert document.sections[0].page == 0
| 30.905983
| 85
| 0.615459
|
72c7fa560dc28f997dff1d4d948ab220e0ec3113
| 13,109
|
py
|
Python
|
tests/unit/models/test_model_interfaces.py
|
st--/trieste
|
8c21681806b96912bd31929ab04d99ef0c6b48c9
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/models/test_model_interfaces.py
|
st--/trieste
|
8c21681806b96912bd31929ab04d99ef0c6b48c9
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/models/test_model_interfaces.py
|
st--/trieste
|
8c21681806b96912bd31929ab04d99ef0c6b48c9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test the *behaviour* of trieste models against reference GPflow models (thus
implicitly assuming the latter are correct).
*NOTE:* Where GPflow models are used as the underlying model in an trieste model, we should
*not* test that the underlying model is used in any particular way. To do so would break
encapsulation. For example, we should *not* test that methods on the GPflow models are called
(except in the rare case that such behaviour is an explicitly documented behaviour of the
trieste model).
"""
from typing import Tuple, Callable, Union
import gpflow
from gpflow.models import GPModel, GPR, SGPR, VGP, SVGP
import pytest
import tensorflow as tf
import numpy as np
import numpy.testing as npt
from trieste.data import Dataset
from trieste.models.model_interfaces import (
CustomTrainable,
Batcher,
GaussianProcessRegression,
GPflowPredictor,
SparseVariational,
VariationalGaussianProcess,
)
from trieste.type import ObserverEvaluations, TensorType, QueryPoints
from tests.util.misc import random_seed
class _MinimalTrainable(CustomTrainable):
def loss(self) -> tf.Tensor:
raise NotImplementedError
def update(self, dataset: Dataset) -> None:
raise NotImplementedError
def predict(self, query_points: QueryPoints) -> Tuple[ObserverEvaluations, TensorType]:
raise NotImplementedError
def sample(self, query_points: QueryPoints, num_samples: int) -> ObserverEvaluations:
raise NotImplementedError
def test_trainable_model_interface_default_optimizer() -> None:
# gpflow.optimizers.Scipy.__init__ is that of object, so it's sufficient to test the type
assert isinstance(_MinimalTrainable().optimizer, gpflow.optimizers.Scipy)
def test_trainable_model_interface_set_optimizer() -> None:
model = _MinimalTrainable()
optimizer = tf.optimizers.Adam()
model.set_optimizer(optimizer)
assert model.optimizer is optimizer
def test_trainable_model_interface_default_optimizer_args() -> None:
assert _MinimalTrainable().optimizer_args == {}
def test_trainable_model_interface_set_optimizer_args() -> None:
model = _MinimalTrainable()
optimizer_args = {"a": 1, "b": 2}
model.set_optimizer_args(optimizer_args)
assert model.optimizer_args == optimizer_args
def test_trainable_model_interface_set_optimize() -> None:
class _OptimizeCallable:
call_count = 0
def __call__(self) -> None:
self.call_count += 1
optimize_callable = _OptimizeCallable()
model = _MinimalTrainable()
model.set_optimize(optimize_callable)
model.optimize()
assert optimize_callable.call_count == 1
def _mock_data() -> Tuple[tf.Tensor, tf.Tensor]:
return (
tf.constant([[1.1], [2.2], [3.3], [4.4]], gpflow.default_float()),
tf.constant([[1.2], [3.4], [5.6], [7.8]], gpflow.default_float()),
)
def _gpr(x: tf.Tensor, y: tf.Tensor) -> GPR:
return GPR((x, y), gpflow.kernels.Linear())
def _sgpr(x: tf.Tensor, y: tf.Tensor) -> SGPR:
return SGPR((x, y), gpflow.kernels.Linear(), x[: len(x) // 2])
def _svgp(inducing_variable: tf.Tensor) -> SVGP:
return SVGP(gpflow.kernels.Linear(), gpflow.likelihoods.Gaussian(), inducing_variable)
def _vgp(x: tf.Tensor, y: tf.Tensor) -> VGP:
likelihood = gpflow.likelihoods.Gaussian()
kernel = gpflow.kernels.Linear()
m = VGP((x, y), kernel, likelihood)
variational_variables = [m.q_mu.unconstrained_variable, m.q_sqrt.unconstrained_variable]
gpflow.optimizers.Scipy().minimize(m.training_loss_closure(), variational_variables)
return m
def _vgp_matern(x: tf.Tensor, y: tf.Tensor) -> VGP:
likelihood = gpflow.likelihoods.Gaussian()
kernel = gpflow.kernels.Matern32(lengthscales=0.2)
m = VGP((x, y), kernel, likelihood)
variational_variables = [m.q_mu.unconstrained_variable, m.q_sqrt.unconstrained_variable]
gpflow.optimizers.Scipy().minimize(m.training_loss_closure(), variational_variables)
return m
@pytest.fixture(
name="gpr_interface_factory",
params=[
(GaussianProcessRegression, _gpr),
(GaussianProcessRegression, _sgpr),
(VariationalGaussianProcess, _vgp),
],
)
def _(request) -> Callable[[tf.Tensor, tf.Tensor], GaussianProcessRegression]:
return lambda x, y: request.param[0](request.param[1](x, y))
def _reference_gpr(x: tf.Tensor, y: tf.Tensor) -> gpflow.models.GPR:
return _gpr(x, y)
def _3x_plus_10(x: tf.Tensor) -> tf.Tensor:
return 3.0 * x + 10
def _2sin_x_over_3(x: tf.Tensor) -> tf.Tensor:
return 2.0 * tf.math.sin(x/3.)
def test_gaussian_process_regression_loss(gpr_interface_factory) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = gpr_interface_factory(x, _3x_plus_10(x))
reference_model = _reference_gpr(x, _3x_plus_10(x))
npt.assert_allclose(model.loss(), -reference_model.log_marginal_likelihood(), rtol=1e-6)
def test_gaussian_process_regression_update(gpr_interface_factory) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = gpr_interface_factory(x, _3x_plus_10(x))
x_new = tf.concat([x, tf.constant([[10.0], [11.0]], dtype=gpflow.default_float())], 0)
model.update(Dataset(x_new, _3x_plus_10(x_new)))
model.optimize()
reference_model = _reference_gpr(x_new, _3x_plus_10(x_new))
gpflow.optimizers.Scipy().minimize(
reference_model.training_loss_closure(), reference_model.trainable_variables
)
npt.assert_allclose(model.loss(), reference_model.training_loss(), rtol=1e-6)
def test_vgp_update_updates_num_data() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = _3x_plus_10(x)
m = VariationalGaussianProcess(_vgp(x, y))
num_data = m.model.num_data
x_new = tf.concat([x, [[10.0], [11.0]]], 0)
y_new = _3x_plus_10(x_new)
m.update(Dataset(x_new, y_new))
new_num_data = m.model.num_data
assert new_num_data - num_data == 2
@random_seed
def test_vgp_update_q_mu_sqrt_unchanged() -> None:
x_observed = tf.constant(np.arange(10).reshape((-1, 1)), dtype=gpflow.default_float())
y_observed = _2sin_x_over_3(x_observed)
model = VariationalGaussianProcess(_vgp_matern(x_observed, y_observed))
old_q_mu = model.model.q_mu.numpy()
old_q_sqrt = model.model.q_sqrt.numpy()
data = Dataset(x_observed, y_observed)
model.update(data)
new_q_mu = model.model.q_mu.numpy()
new_q_sqrt = model.model.q_sqrt.numpy()
npt.assert_allclose(old_q_mu, new_q_mu, atol=1e-5)
npt.assert_allclose(old_q_sqrt, new_q_sqrt, atol=1e-5)
@random_seed
def test_gaussian_process_regression_default_optimize(gpr_interface_factory) -> None:
model = gpr_interface_factory(*_mock_data())
loss = model.loss()
model.optimize()
assert model.loss() < loss
@random_seed
@pytest.mark.parametrize("optimizer", [gpflow.optimizers.Scipy(), tf.optimizers.Adam(), None])
def test_gaussian_process_regression_optimize(
optimizer: Union[gpflow.optimizers.Scipy, tf.optimizers.Optimizer, None], gpr_interface_factory
) -> None:
model = gpr_interface_factory(*_mock_data())
model.set_optimizer(optimizer)
model.set_optimize()
loss = model.loss()
model.optimize()
assert model.loss() < loss
def _3x_plus_gaussian_noise(x: tf.Tensor) -> tf.Tensor:
return 3.0 * x + np.random.normal(scale=0.01, size=x.shape)
@random_seed
def test_variational_gaussian_process_predict() -> None:
x_observed = tf.constant(np.arange(100).reshape((-1, 1)), dtype=gpflow.default_float())
y_observed = _3x_plus_gaussian_noise(x_observed)
model = VariationalGaussianProcess(_vgp(x_observed, y_observed))
gpflow.optimizers.Scipy().minimize(
model.loss, model.trainable_variables,
)
x_predict = tf.constant([[50.5]], gpflow.default_float())
mean, variance = model.predict(x_predict)
reference_model = _reference_gpr(x_observed, y_observed)
gpflow.optimizers.Scipy().minimize(
reference_model.training_loss_closure(), reference_model.trainable_variables,
)
reference_mean, reference_variance = reference_model.predict_f(x_predict)
npt.assert_allclose(mean, reference_mean)
npt.assert_allclose(variance, reference_variance, atol=1e-3)
class _QuadraticPredictor(GPflowPredictor):
@property
def model(self) -> GPModel:
return _QuadraticGPModel()
class _QuadraticGPModel(GPModel):
def __init__(self):
super().__init__(
gpflow.kernels.Polynomial(2), # not actually used
gpflow.likelihoods.Gaussian(), num_latent_gps=1
)
def predict_f(
self, Xnew: tf.Tensor, full_cov: bool = False, full_output_cov: bool = False
) -> Tuple[tf.Tensor, tf.Tensor]:
assert not full_output_cov, "Test utility not implemented for full output covariance"
mean = tf.reduce_sum(Xnew ** 2, axis=1, keepdims=True)
*leading, x_samples, y_dims = mean.shape
var_shape = [*leading, y_dims, x_samples, x_samples] if full_cov else mean.shape
return mean, tf.ones(var_shape, dtype=mean.dtype)
def maximum_log_likelihood_objective(self, *args, **kwargs) -> tf.Tensor:
raise NotImplementedError
def test_gpflow_predictor_predict() -> None:
model = _QuadraticPredictor()
mean, variance = model.predict(tf.constant([[2.5]], gpflow.default_float()))
assert mean.shape == [1, 1]
assert variance.shape == [1, 1]
npt.assert_allclose(mean, [[6.25]], rtol=0.01)
npt.assert_allclose(variance, [[1.0]], rtol=0.01)
@random_seed
def test_gpflow_predictor_sample() -> None:
model = _QuadraticPredictor()
num_samples = 20_000
samples = model.sample(tf.constant([[2.5]], gpflow.default_float()), num_samples)
assert samples.shape == [num_samples, 1, 1]
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2)
linear_error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, [[6.25]], rtol=linear_error)
npt.assert_allclose(sample_variance, 1.0, rtol=2 * linear_error)
def test_gpflow_predictor_sample_no_samples() -> None:
samples = _QuadraticPredictor().sample(tf.constant([[50.]], gpflow.default_float()), 0)
assert samples.shape == (0, 1, 1)
def test_sparse_variational_model_attribute() -> None:
model = _svgp(_mock_data()[0])
sv = SparseVariational(model, Dataset(*_mock_data()), tf.optimizers.Adam(), iterations=10)
assert sv.model is model
@pytest.mark.parametrize('new_data', [
Dataset(tf.zeros([3, 5]), tf.zeros([3, 1])), Dataset(tf.zeros([3, 4]), tf.zeros([3, 2]))
])
def test_sparse_variational_update_raises_for_invalid_shapes(new_data: Dataset) -> None:
model = SparseVariational(
_svgp(tf.zeros([1, 4])),
Dataset(tf.zeros([3, 4]), tf.zeros([3, 1])),
tf.optimizers.Adam(),
iterations=10
)
with pytest.raises(ValueError):
model.update(new_data)
def test_sparse_variational_optimize_with_defaults() -> None:
x_observed = tf.constant(np.arange(100).reshape((-1, 1)), dtype=gpflow.default_float())
y_observed = _3x_plus_gaussian_noise(x_observed)
model = SparseVariational(
_svgp(x_observed[:10]),
Dataset(tf.constant(x_observed), tf.constant(y_observed)),
tf.optimizers.Adam(),
iterations=20
)
loss = model.model.training_loss((x_observed, y_observed))
model.optimize()
assert model.model.training_loss((x_observed, y_observed)) < loss
@pytest.mark.parametrize('apply_jit', [True, False])
@pytest.mark.parametrize('batcher', [
lambda ds: tf.data.Dataset.from_tensors((ds.query_points, ds.observations)).shuffle(100).batch(10),
lambda ds: [(ds.query_points, ds.observations)]
])
def test_sparse_variational_optimize(batcher: Batcher, apply_jit: bool) -> None:
x_observed = tf.constant(np.arange(100).reshape((-1, 1)), dtype=gpflow.default_float())
y_observed = _3x_plus_gaussian_noise(x_observed)
model = SparseVariational(
_svgp(x_observed[:10]),
Dataset(tf.constant(x_observed), tf.constant(y_observed)),
tf.optimizers.Adam(),
iterations=20,
batcher=batcher,
apply_jit=apply_jit
)
loss = model.model.training_loss((x_observed, y_observed))
model.optimize()
assert model.model.training_loss((x_observed, y_observed)) < loss
| 35.525745
| 103
| 0.714776
|
04f0edf35a3a39e7aef8d93341690f4bd4e4a709
| 2,839
|
py
|
Python
|
calendarapi.py
|
AsoTora/GoogleCalendarAPI
|
72ada9fc29269a55c195000bde22a2d1050de4d5
|
[
"MIT"
] | null | null | null |
calendarapi.py
|
AsoTora/GoogleCalendarAPI
|
72ada9fc29269a55c195000bde22a2d1050de4d5
|
[
"MIT"
] | null | null | null |
calendarapi.py
|
AsoTora/GoogleCalendarAPI
|
72ada9fc29269a55c195000bde22a2d1050de4d5
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar']
def auth():
"""Get authentication token fot the module to work
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('calendar', 'v3', credentials=creds)
def get_calendar_id(name):
"""
loop over the calendars in calendarList to get the calendar Id
by the given name
"""
service = auth()
page_token = None
while True:
calendar_list = service.calendarList().list(
pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
if calendar_list_entry['summary'] == name:
calendar_id = calendar_list_entry['id']
return calendar_id
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
raise 'NoCalendarIdEntryError'
def create_event(data):
""" Create Google Calendar Event with given Information
data format: {shift': '', 'start_date': '', 'start_time': '',
'end_time': '', 'end_date': ''}
"""
service = auth()
work_id = get_calendar_id('Work')
event = {
'summary': "".join(data['shift']),
'location': 'Prospekt Dzerzhinskogo 104, Minsk 220089',
'start': {
'dateTime': '{}T{}'.format(
data['start_date'], data['start_time']),
'timeZone': 'Europe/Minsk',
},
'end': {
'dateTime': '{}T{}'.format(
data['end_date'], data['end_time']),
'timeZone': 'Europe/Minsk',
},
'reminders': {
'useDefault': True,
},
}
event = service.events().insert(calendarId=work_id, body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
| 33.4
| 79
| 0.613244
|
e7189fce5c973a301ef5503ac1bf7607898c2691
| 22,831
|
py
|
Python
|
test/functional/test_framework/test_framework.py
|
Mirkolance1/altcoin
|
d77e25be0ce015ff03c3df79801ac208e68be963
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_framework.py
|
Mirkolance1/altcoin
|
d77e25be0ce015ff03c3df79801ac208e68be963
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_framework.py
|
Mirkolance1/altcoin
|
d77e25be0ce015ff03c3df79801ac208e68be963
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a altcoin test script.
Individual altcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.mocktime = 0
self.rpc_timewait = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = False
self.bind_to_localhost_only = True
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave altcoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop altcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use altcoin-cli instead of RPC for all commands")
self.add_options(parser)
self.options = parser.parse_args()
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.options.bitcoind = os.getenv("ALTCOIND", default=config["environment"]["BUILDDIR"] + '/src/altcoind' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv("LITECOINCLI", default=config["environment"]["BUILDDIR"] + '/src/altcoin-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.import_deterministic_coinbase_privkeys()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: altcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def import_deterministic_coinbase_privkeys(self):
if self.setup_clean_chain:
return
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(n.get_deterministic_priv_key()[1])
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects"""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, get_datadir_path(self.options.tmpdir, i), rpchost=rpchost, timewait=self.rpc_timewait, bitcoind=binary[i], bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a altcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple altcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr=''):
"""Stop a altcoind test node"""
self.nodes[i].stop_node(expected_stderr)
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple altcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backward compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [self.options.bitcoind, "-datadir=" + datadir, '-disablewallet']
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, get_datadir_path(self.options.cachedir, i), extra_conf=["bind=127.0.0.1"], extra_args=[], rpchost=None, timewait=self.rpc_timewait, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generatetoaddress(1, self.nodes[peer].get_deterministic_priv_key()[0])
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
os.rmdir(cache_path(i, 'wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path(i)):
if entry not in ['chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitcoin-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_UTILS")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_ZMQ")
| 42.046041
| 331
| 0.633306
|
ca5ca7c23b016659991347bdf539b0117aa78b2e
| 2,086
|
py
|
Python
|
round 1 A/square.py
|
rocket3989/Code-Jam-2020
|
cb9f91af25581cd68c5ee89f1ce88200c574f25c
|
[
"MIT"
] | 1
|
2020-04-05T02:10:44.000Z
|
2020-04-05T02:10:44.000Z
|
round 1 A/square.py
|
rocket3989/Code-Jam-2020
|
cb9f91af25581cd68c5ee89f1ce88200c574f25c
|
[
"MIT"
] | null | null | null |
round 1 A/square.py
|
rocket3989/Code-Jam-2020
|
cb9f91af25581cd68c5ee89f1ce88200c574f25c
|
[
"MIT"
] | null | null | null |
for tc in range(int(input())):
R, C = [int(x) for x in input().split()]
floor = []
for r in range(R):
floor.append([int(x) for x in input().split()])
score = 0
while True:
neighborSum = [[0 for i in range(C)] for j in range(R)]
count = [[0 for i in range(C)] for j in range(R)]
score += sum(sum(row) for row in floor)
for r in range(R):
lastSeen = -1
for c in range(C):
if lastSeen != -1:
neighborSum[r][c] += lastSeen
count[r][c] += 1
if floor[r][c] != 0:
lastSeen = floor[r][c]
for c in range(C):
lastSeen = -1
for r in range(R):
if lastSeen != -1:
neighborSum[r][c] += lastSeen
count[r][c] += 1
if floor[r][c] != 0:
lastSeen = floor[r][c]
for r in range(R):
lastSeen = -1
for c in range(C - 1, - 1, -1):
if lastSeen != -1:
neighborSum[r][c] += lastSeen
count[r][c] += 1
if floor[r][c] != 0:
lastSeen = floor[r][c]
for c in range(C):
lastSeen = -1
for r in range(R - 1, -1, -1):
if lastSeen != -1:
neighborSum[r][c] += lastSeen
count[r][c] += 1
if floor[r][c] != 0:
lastSeen = floor[r][c]
elim = 0
for r in range(R):
for c in range(C):
if floor[r][c] == 0: continue
if count[r][c] != 0:
if floor[r][c] < neighborSum[r][c] / count[r][c]:
floor[r][c] = 0
elim += 1
if elim == 0:
break
print("Case #{}: {}".format(tc + 1, score))
| 32.59375
| 70
| 0.349952
|
72b6bcc8f8cc3199f8abc644ec0a2210a83e290f
| 20,955
|
py
|
Python
|
languageModels.py
|
isibord/LanguageModels
|
fe6a478e01cc7a5682a38620bcc052020ed5d30f
|
[
"Apache-2.0"
] | 1
|
2019-06-04T19:45:44.000Z
|
2019-06-04T19:45:44.000Z
|
languageModels.py
|
isibord/LanguageModels
|
fe6a478e01cc7a5682a38620bcc052020ed5d30f
|
[
"Apache-2.0"
] | null | null | null |
languageModels.py
|
isibord/LanguageModels
|
fe6a478e01cc7a5682a38620bcc052020ed5d30f
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from math import log, pow
import argparse
"""
Given a path to the corpus file, process it with our without unk for the models to access it
"""
class ProcessCorpus:
corpusArray = []
wordCountList = {}
totalNumWords = 0
unk = '<unk>'
startSymbol = '<s>'
stopSymbol = '</s>'
"""
Initialialize class objects and process corpus file
"""
def __init__(self, corpusPath, unk=False):
self.corpusPath = corpusPath
self.wordCountList = defaultdict(lambda: 0)
self.totalNumWords = 0
if unk:
self.process_with_unk()
else:
self.process_default()
"""
Process the corpus file, with unk symbols for rare words (count == 1)
"""
def process_with_unk(self):
f = open(self.corpusPath)
self.corpusArray = []
self.totalNumWords = 0
self.wordCountList = defaultdict(lambda: 0)
wordCountListHelper = defaultdict(lambda: 0)
for sentence in f:
words = sentence.split()
for word in words:
wordCountListHelper[word] = wordCountListHelper[word] + 1
self.totalNumWords += 1
rarewords = [key for key, count in wordCountListHelper.items() if count == 1]
f.seek(0)
for sentence in f:
words = sentence.split()
newsentence = []
for word in words:
if word in rarewords:
newsentence.append(self.unk)
self.wordCountList[self.unk] = self.wordCountList[self.unk] + 1
else:
newsentence.append(word)
self.wordCountList[word] = self.wordCountList[word] + 1
self.corpusArray.append(newsentence)
"""
Process the corpus file as is
"""
def process_default(self):
f = open(self.corpusPath)
self.corpusArray = []
self.totalNumWords = 0
self.wordCountList = defaultdict(lambda: 0)
for sentence in f:
sentence = sentence.strip()
words = sentence.split()
self.corpusArray.append(words)
for word in words:
self.wordCountList[word] = self.wordCountList[word] + 1
self.totalNumWords += 1
"""
This class contains the unigram model for the subsequent interpolation
"""
class UnigramModel:
"""
Unigram model already gets the information it needs (word count) from the corpus data
which contains the ProcessCorpus object
"""
def __init__(self, corpusData):
self.trainingCorpus = corpusData
"""
Scores the probability of a given sentence / word (if sentence contains one word
"""
def score_probability_of_sentence(self, sentence):
score = 0.0
for word in sentence:
wordcount = self.trainingCorpus.wordCountList[word]
# log(a/b) = log(a) - log(b)
if wordcount > 0:
score += (log(wordcount, 2) - log(self.trainingCorpus.totalNumWords, 2))
else:
wordcount = self.trainingCorpus.wordCountList[ProcessCorpus.unk]
score += (log(wordcount, 2) - log(self.trainingCorpus.totalNumWords, 2))
return score
"""
This class represents the bigram model which would later be used for interpolation
"""
class BigramModel:
def __init__(self, corpusData):
self.trainingCorpus = corpusData
self.bigramCountList = defaultdict(lambda: 0)
self.train_bigram_model()
"""
Splits corpus into bigrams for training the model as well as storing counts to be used later
"""
def train_bigram_model(self):
for sentence in self.trainingCorpus.corpusArray:
unigram1 = ProcessCorpus.startSymbol
self.trainingCorpus.wordCountList[unigram1] = self.trainingCorpus.wordCountList[unigram1] + 1
unigram2 = ''
for word in sentence:
unigram2 = word
self.bigramCountList[(unigram1,unigram2)] = self.bigramCountList[(unigram1,unigram2)] + 1
unigram1 = word
unigram2 = ProcessCorpus.stopSymbol
self.bigramCountList[(unigram1, unigram2)] = self.bigramCountList[(unigram1, unigram2)] + 1
"""
Scores the log probability of a given sentence using the information computed in the train function
Using Laplace smoothing to prevent undefined probability in zero history situations
"""
def score_probability_of_sentence(self, sentence):
score = 0.0
unigram1 = ProcessCorpus.startSymbol
unigram2 = ''
for word in sentence:
unigram2 = word
bigramFrequency = self.bigramCountList[(unigram1, unigram2)]
#Used laplace smoothing #NOTE
score += (log(bigramFrequency + 1, 2) - (log(self.trainingCorpus.wordCountList[unigram1] + len(self.trainingCorpus.wordCountList), 2)))
unigram1 = word
unigram2 = ProcessCorpus.stopSymbol
bigramFrequency = self.bigramCountList[(unigram1, unigram2)]
score += (log(bigramFrequency + 1, 2) - (log(self.trainingCorpus.wordCountList[unigram1] + len(self.trainingCorpus.wordCountList), 2)))
return score
"""
Score the MLE probability of a bigram from this model's trained data
"""
def score_mle_probability(self, bigram):
score = 0.0
unigram1, unigram2 = bigram
bigramFrequency = self.bigramCountList[bigram]
# Used laplace smoothing here #NOTE
score += (log(bigramFrequency + 1, 2) - (log(self.trainingCorpus.wordCountList[unigram1] + len(self.trainingCorpus.wordCountList), 2)))
return score
"""
This class represents the trigram model which would later be used for interpolation
"""
class TrigramModel:
def __init__(self, corpusData, relatedBigram, delta=0):
self.trainingCorpus = corpusData
self.trigramCountList = defaultdict(lambda: 0)
self.delta = delta
self.relatedBigram = relatedBigram
self.train_trigram_model()
"""
Splits corpus into trigram for training the model as well as storing counts to be used later
"""
def train_trigram_model(self):
for sentence in self.trainingCorpus.corpusArray:
unigram1 = ProcessCorpus.startSymbol
unigram2 = ProcessCorpus.startSymbol
unigram3 = ''
self.relatedBigram.bigramCountList[(unigram1, unigram2)] = self.relatedBigram.bigramCountList[(unigram1, unigram2)] + 1
for word in sentence:
unigram3 = word
self.trigramCountList[(unigram1, unigram2, unigram3)] = self.trigramCountList[(unigram1, unigram2, unigram3)] + 1
unigram1 = unigram2
unigram2 = word
unigram3 = ProcessCorpus.stopSymbol
self.trigramCountList[(unigram1, unigram2, unigram3)] = self.trigramCountList[(unigram1, unigram2, unigram3)] + 1
"""
Scores the log probability of a given sentence using the information computed in the train function
Using Laplace smoothing to prevent undefined probability in zero history situations
"""
def score_probability_of_sentence(self, sentence):
score = 0.0
unigram1 = ProcessCorpus.startSymbol
unigram2 = ProcessCorpus.startSymbol
unigram3 = ''
for word in sentence:
unigram3 = word
trigramFrequency = self.trigramCountList[(unigram1, unigram2, unigram3)]
bigramFrequency = self.relatedBigram.bigramCountList[(unigram1, unigram2)]
#Used laplace smoothing #NOTE
score += (log((trigramFrequency + 1) - self.delta, 2) - (log(bigramFrequency + len(self.trainingCorpus.wordCountList), 2)))
unigram1 = unigram2
unigram2 = word
unigram3 = ProcessCorpus.stopSymbol
trigramFrequency = self.trigramCountList[(unigram1, unigram2, unigram3)]
bigramFrequency = self.relatedBigram.bigramCountList[(unigram1, unigram2)]
score += (log((trigramFrequency + 1) - self.delta, 2) - (log(bigramFrequency + len(self.trainingCorpus.wordCountList), 2)))
return score
"""
Scores the MLE probability of a given trigram from the trained data
"""
def score_mle_probability(self, trigram):
score = 0.0
unigram1, unigram2, unigram3 = trigram
trigramFrequency = self.trigramCountList[trigram]
bigramFrequency = self.relatedBigram.bigramCountList[(unigram1, unigram2)]
#Used laplace smoothing here #NOTE
score += (log((trigramFrequency + 1) - self.delta, 2) - (log(bigramFrequency + len(self.trainingCorpus.wordCountList), 2)))
return score
"""
This model interpolates a unigram, bigram and trigram model with some hyperparameters as weights for each model
"""
class InterpolationModel:
"""
initialize individual models with training done in initialization
"""
def __init__(self, corpusData, uniweight=0.2, biweight=0.3, triweight=0.5):
self.uniweight = uniweight
self.biweight = biweight
self.triweight = triweight
self.trainingData = corpusData
self.unigramModel = UnigramModel(corpusData)
self.bigramModel = BigramModel(corpusData)
self.trigramModel = TrigramModel(corpusData, self.bigramModel, 0)
"""
Score a sentence with the interpolation of the three models and weights
"""
def score_sentence(self, sentence):
score = 0.0
score += self.uniweight * self.unigramModel.score_probability_of_sentence(sentence)
score += self.biweight * self.bigramModel.score_probability_of_sentence(sentence)
score += self.triweight * self.trigramModel.score_probability_of_sentence(sentence)
return score
"""
Calculate perplexity of a corpus for this model
"""
def calculate_perplexity(self, corpus):
logSum = 0.0
numWordsInCorpus = 0
perplexity = 0.0
for sentence in corpus.corpusArray:
numWordsInCorpus += len(sentence)
logSum += (-1 * self.score_sentence(sentence))
perplexity = logSum / numWordsInCorpus
perplexity = pow(2, perplexity)
return perplexity
"""
This model creates the backoff model implementation for the proposed modification to Assignment 1
"""
class BackoffModel:
"""
Initializes with raw corpus data or an existing set of trained unigram, bigram and trigram models
"""
def __init__(self, corpusData, delta, unigramModel = None, bigramModel = None, trigramModel = None):
self.trainingCorpus = corpusData
if unigramModel is not None:
self.unigramModel = unigramModel
else:
self.unigramModel = UnigramModel(corpusData)
if bigramModel is not None:
self.bigramModel = bigramModel
else:
self.bigramModel = BigramModel(corpusData)
if trigramModel is not None:
self.trigramModel = trigramModel
self.trigramModel.delta = delta
else:
self.trigramModel = TrigramModel(corpusData, self.bigramModel, delta)
self.historyList = defaultdict() #for each history contains B(w_i-2, w_i-1) and #B(w_i-1)
self.B1List = defaultdict() #for each bigram contains B(w_i-1)
self.train_model()
"""
Train model by computing additional data to help with 'missing mass' value
"""
def train_model(self):
for history, count in self.bigramModel.bigramCountList.items():
newHistory = BackoffData(history)
unigram1, unigram2 = history
newB1History = BackoffData(unigram2)
totalprob = 0.0
for word, freq in self.trainingCorpus.wordCountList.items():
if self.trigramModel.trigramCountList.get((unigram1, unigram2, word), 0) > 0:
totalprob += self.trigramModel.score_mle_probability((unigram1, unigram2, word))
else:
newHistory.B2.append(word)
if self.bigramModel.bigramCountList.get((unigram2, word), 0) == 0:
newHistory.B1.append(word)
newB1History.B1.append(word)
newHistory.q = 1 - (pow(2, totalprob))
newB1History.q = newHistory.q
self.historyList[history] = newHistory
self.B1List[unigram2] = newB1History
"""
Score a sentence in this model
"""
def score_sentence(self, sentence):
score = 0.0
unigram1 = ProcessCorpus.startSymbol
unigram2 = ProcessCorpus.startSymbol
unigram3 = ''
for word in sentence:
unigram3 = word
trigram = (unigram1, unigram2, unigram3)
bigram = (unigram2, unigram3)
if self.trigramModel.trigramCountList[trigram] > 0:
#p1 case
score += self.trigramModel.score_mle_probability(trigram)
elif (self.trigramModel.trigramCountList[trigram] == 0) and (self.bigramModel.bigramCountList[bigram] > 0):
#p2 case
numerator = self.bigramModel.score_mle_probability(bigram)
denominator = 0.0
historyitem = [historydata for historykey, historydata in self.historyList.items() if historykey == bigram]
historyitem = historyitem[0]
for b2item in historyitem.B2:
denominator += self.bigramModel.score_mle_probability((unigram2, b2item))
score += (numerator/denominator) * 0.5 * historyitem.q
elif self.bigramModel.bigramCountList[bigram] == 0:
#p3 case
numerator = self.unigramModel.score_probability_of_sentence([unigram3])
denominator = 0.0
historyitem = [historydata for historykey, historydata in self.historyList.items() if historykey == bigram]
if len(historyitem) > 0:
historyitem = historyitem[0]
for b1item in historyitem.B1:
denominator += self.unigramModel.score_probability_of_sentence([b1item])
score += (numerator / denominator) * 0.5 * historyitem.q
else:
historyitem = [historydata for historykey, historydata in self.B1List.items() if historykey == unigram2]
if len(historyitem) > 0:
historyitem = historyitem[0]
for b1item in historyitem.B1:
denominator += self.unigramModel.score_probability_of_sentence([b1item])
score += (numerator / denominator) * 0.5 * historyitem.q
else:
# unknown word for unigram calculation
historyitem = [historydata for historykey, historydata in self.B1List.items() if historykey == ProcessCorpus.unk]
if len(historyitem) > 0:
historyitem = historyitem[0]
for b1item in historyitem.B1:
denominator += self.unigramModel.score_probability_of_sentence([b1item])
score += (numerator / denominator) * 0.5 * historyitem.q
unigram1 = unigram2
unigram2 = word
unigram3 = ProcessCorpus.stopSymbol
trigram = (unigram1, unigram2, unigram3)
bigram = (unigram2, unigram3)
if self.trigramModel.trigramCountList[trigram] > 0:
# p1 case
score += self.trigramModel.score_mle_probability(trigram)
elif (self.trigramModel.trigramCountList[trigram] == 0) and (self.bigramModel.bigramCountList[bigram] > 0):
# p2 case
numerator = self.bigramModel.score_mle_probability(bigram)
denominator = 0.0
historyitem = [historydata for historykey, historydata in self.historyList.items() if historykey == bigram]
historyitem = historyitem[0]
for b2item in historyitem.B2:
denominator += self.bigramModel.score_mle_probability((unigram2, b2item))
score += (numerator / denominator) * 0.5 * historyitem.q
elif self.bigramModel.bigramCountList[bigram] == 0:
# p3 case
numerator = self.unigramModel.score_probability_of_sentence([unigram3])
denominator = 0.0
historyitem = [historydata for historykey, historydata in self.historyList.items() if historykey == bigram]
if len(historyitem) > 0:
historyitem = historyitem[0]
for b1item in historyitem.B1:
denominator += self.unigramModel.score_probability_of_sentence([b1item])
score += (numerator / denominator) * 0.5 * historyitem.q
else:
historyitem = [historydata for historykey, historydata in self.B1List.items() if historykey == unigram2]
if len(historyitem) > 0:
historyitem = historyitem[0]
for b1item in historyitem.B1:
denominator += self.unigramModel.score_probability_of_sentence([b1item])
score += (numerator / denominator) * 0.5 * historyitem.q
else:
#unknown word for unigram calculation
historyitem = [historydata for historykey, historydata in self.B1List.items() if historykey == ProcessCorpus.unk]
if len(historyitem) > 0:
historyitem = historyitem[0]
for b1item in historyitem.B1:
denominator += self.unigramModel.score_probability_of_sentence([b1item])
score += (numerator / denominator) * 0.5 * historyitem.q
return score
"""
Calculate perplexity of a corpus for this model
"""
def calculate_perplexity(self, corpus):
logSum = 0.0
numWordsInCorpus = 0
perplexity = 0.0
for sentence in corpus.corpusArray:
numWordsInCorpus += len(sentence)
logSum += (-1 * self.score_sentence(sentence))
perplexity = logSum / numWordsInCorpus
perplexity = pow(2, perplexity)
return perplexity
"""
This holds the data for each history in backoff model
"""
class BackoffData:
def __init__(self, history):
self.history = history #history
self.q = 0.0 #missing mass
self.B2 = [] #B(w_i-2, w_i-1)
self.B1 = [] #B(w_i-1)
#TODO:store this data into file once computed
"""
Parge command line arguments: training file path and test file path for a model
"""
def parse_args():
argParser = argparse.ArgumentParser(description='Parse settings to run models')
argParser.add_argument('filepathtrain', help='Path to file to train model')
argParser.add_argument('filepathtest', help='Path to file to test model')
options = argParser.parse_args()
return options
"""
Execute an instance of interpolation and backoff model for a given train and test corpus
"""
def main():
args = parse_args()
trainDataPath = args.filepathtrain
testDataPath = args.filepathtest
corpusData = ProcessCorpus(trainDataPath, True)
interpolationModel = InterpolationModel(corpusData, 0.05, 0.15, 0.8)
testCorpus = ProcessCorpus(testDataPath, False)
interpolation_perplexity = interpolationModel.calculate_perplexity(testCorpus)
backoffModel = BackoffModel(corpusData, 0.5, interpolationModel.unigramModel, interpolationModel.bigramModel, interpolationModel.trigramModel)
backoff_perplexity = backoffModel.calculate_perplexity(testCorpus)
with open("outputData.txt", 'w') as f:
f.write("Trainfile: " + trainDataPath + '\r\n')
f.write("Testpath: " + testDataPath + '\r\n')
f.write("Interpolation Perplexity: " + str(interpolation_perplexity) + '\r\n')
f.write("Backoff Perplexity: " + str(backoff_perplexity) + '\r\n')
f.write("Interpolation hyperparameters: 0.05, 0.15, 0.8" + '\r\n')
f.write("Backoff delta hyperparameters: 0.5" + '\r\n')
if __name__ == '__main__':
main()
| 43.74739
| 148
| 0.608542
|
868f9d7f0b008e42afa9c22df37d630cf9a14177
| 2,690
|
py
|
Python
|
test/SConsignFile/default.py
|
acmorrow/scons
|
0d9a99d630169277689e95e39ae64ccf9b9215bf
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/SConsignFile/default.py
|
acmorrow/scons
|
0d9a99d630169277689e95e39ae64ccf9b9215bf
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/SConsignFile/default.py
|
acmorrow/scons
|
0d9a99d630169277689e95e39ae64ccf9b9215bf
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
#!/usr/bin/env python
#
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Verify the default behavior of SConsignFile() called with no arguments."""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('subdir')
test.write('build.py', r"""
import sys
with open(sys.argv[1], 'wb') as ofp, open(sys.argv[2], 'rb') as ifp:
ofp.write(ifp.read())
sys.exit(0)
""")
#
test.write('SConstruct', """
SConsignFile()
DefaultEnvironment(tools=[])
B = Builder(action=r'%(_python_)s build.py $TARGETS $SOURCES')
env = Environment(BUILDERS={'B': B}, tools=[])
env.B(target='f1.out', source='f1.in')
env.B(target='f2.out', source='f2.in')
env.B(target='subdir/f3.out', source='subdir/f3.in')
env.B(target='subdir/f4.out', source='subdir/f4.in')
""" % locals())
test.write('f1.in', "f1.in\n")
test.write('f2.in', "f2.in\n")
test.write(['subdir', 'f3.in'], "subdir/f3.in\n")
test.write(['subdir', 'f4.in'], "subdir/f4.in\n")
test.run()
test.must_exist(test.workpath('.sconsign.dblite'))
test.must_not_exist(test.workpath('.sconsign'))
test.must_not_exist(test.workpath('subdir', '.sconsign'))
test.must_match('f1.out', "f1.in\n")
test.must_match('f2.out', "f2.in\n")
test.must_match(['subdir', 'f3.out'], "subdir/f3.in\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\n")
test.up_to_date(arguments='.')
test.must_exist(test.workpath('.sconsign.dblite'))
test.must_not_exist(test.workpath('.sconsign'))
test.must_not_exist(test.workpath('subdir', '.sconsign'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 32.02381
| 77
| 0.722677
|
ada1ca7f8387eb79996255c875d78da190d72f4e
| 9,630
|
py
|
Python
|
test/functional/wallet_import_rescan.py
|
Dollar-coin/Dollar
|
4b84e5d14408f3985d527aaccac21472b47c91d5
|
[
"MIT"
] | 1
|
2021-02-06T22:18:29.000Z
|
2021-02-06T22:18:29.000Z
|
test/functional/wallet_import_rescan.py
|
Dollar-coin/Dollar
|
4b84e5d14408f3985d527aaccac21472b47c91d5
|
[
"MIT"
] | 1
|
2021-02-07T00:57:29.000Z
|
2021-02-07T10:22:29.000Z
|
test/functional/wallet_import_rescan.py
|
Dollar-coin/Dollar
|
4b84e5d14408f3985d527aaccac21472b47c91d5
|
[
"MIT"
] | 1
|
2021-02-26T22:29:45.000Z
|
2021-02-26T22:29:45.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends DOLLAR to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more DOLLAR to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import DollarTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, address=self.address["address"], label=self.label, rescan=rescan)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, pubkey=self.address["pubkey"], label=self.label, rescan=rescan)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, privkey=self.key, label=self.label, rescan=rescan)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that listtransactions/listreceivedbyaddress return expected values."""
txs = self.node.listtransactions(label=self.label, count=10000, skip=0, include_watchonly=True)
assert_equal(len(txs), self.expected_txs)
addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["label"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], confirmations)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(DollarTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
extra_args = [["-addresstype=legacy"] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args=extra_args)
# Import keys
self.start_nodes(extra_args=[[]] * self.num_nodes)
super().import_deterministic_coinbase_privkeys()
self.stop_nodes()
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def import_deterministic_coinbase_privkeys(self):
pass
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| 46.97561
| 130
| 0.664486
|
9bd0c7c8f9266ded85b09693ed8772b189718a99
| 2,592
|
py
|
Python
|
operations.py
|
wesleydutrads/cash-machine-python
|
7c6f26a9c290c83efc82d2c68f02cb5d2fca54ba
|
[
"MIT"
] | null | null | null |
operations.py
|
wesleydutrads/cash-machine-python
|
7c6f26a9c290c83efc82d2c68f02cb5d2fca54ba
|
[
"MIT"
] | 1
|
2021-07-01T02:19:12.000Z
|
2021-07-01T02:19:12.000Z
|
operations.py
|
wesleydutrads/cash-machine-python
|
7c6f26a9c290c83efc82d2c68f02cb5d2fca54ba
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import getpass
from bank_account_variables import account_lists, money_slips
def do_operation(option_typed, account_auth):
if option_typed == '1':
show_balance(account_auth)
elif option_typed == '10' and account_lists[account_auth]['admin']:
insert_money_slips()
elif option_typed == '2':
withdraw(account_auth)
def show_balance(account_auth):
print("****************************************")
print('Seu saldo é %s' % account_lists[account_auth]['value'])
print("****************************************")
def insert_money_slips():
amount_typed = input('Digite a quantidade de cédulas: ')
money_bill_typed = input('Digite a cédula a ser incluída: ')
money_slips[money_bill_typed] += int(amount_typed)
print(money_slips)
def withdraw(account_auth):
value_typed = input('Digite o valor a ser Sacado: ')
money_slips_user = {}
value_int = int(value_typed)
# if value_int // 100 > 0 and value_int // 100 <= money_slips['100']:
if 0 < value_int // 100 <= money_slips['100']:
money_slips_user['100'] = value_int // 100
value_int = value_int - value_int // 100 * 100
if 0 < value_int // 50 <= money_slips['50']:
money_slips_user['50'] = value_int // 50
value_int = value_int - value_int // 50 * 50
if 0 < value_int // 20 <= money_slips['20']:
money_slips_user['20'] = value_int // 20
value_int = value_int - value_int // 20 * 20
if value_int != 0 and account_lists[account_auth]['value'] >= int(value_typed):
print('O caixa não tem cédulas disponíveis para este valor')
else:
for money_bill in money_slips_user:
money_slips[money_bill] -= money_slips_user[money_bill]
account_lists[account_auth]['value'] -= int(value_typed)
print('Pegue as notas: %s' % money_slips_user)
print("****************************************")
print('Seu saldo é %s' % account_lists[account_auth]['value'])
print("****************************************")
def auth_account():
account_typed = input('Digite sua conta: ')
password_typed = getpass.getpass('Digite sua senha: ')
if account_typed in account_lists and password_typed == account_lists[account_typed]['password']:
return account_typed
else:
return False
def get_menu_options_typed(account_auth):
print("1 - Saldo")
print("2 - Saque")
if account_lists[account_auth]['admin']:
print("10 - Incluir Cédulas")
return input('Escolha uma das opções acima: ')
| 35.027027
| 101
| 0.616127
|
13b86f573f79e93c160b136be6a1b2917cfe5408
| 3,025
|
py
|
Python
|
Gena/map_center_object_no_zoom.py
|
guy1ziv2/earthengine-py-notebooks
|
931f57c61c147fe6cff745c2a099a444716e69e4
|
[
"MIT"
] | 1
|
2020-12-23T16:26:05.000Z
|
2020-12-23T16:26:05.000Z
|
Gena/map_center_object_no_zoom.py
|
guy1ziv2/earthengine-py-notebooks
|
931f57c61c147fe6cff745c2a099a444716e69e4
|
[
"MIT"
] | null | null | null |
Gena/map_center_object_no_zoom.py
|
guy1ziv2/earthengine-py-notebooks
|
931f57c61c147fe6cff745c2a099a444716e69e4
|
[
"MIT"
] | 1
|
2020-12-23T16:29:51.000Z
|
2020-12-23T16:29:51.000Z
|
'''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Gena/map_center_object_no_zoom.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Gena/map_center_object_no_zoom.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Gena/map_center_object_no_zoom.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Gena/map_center_object_no_zoom.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
# get a single feature
countries = ee.FeatureCollection("USDOS/LSIB_SIMPLE/2017")
country = countries.filter(ee.Filter.eq('country_na', 'Ukraine'))
Map.addLayer(country, { 'color': 'orange' }, 'feature collection layer')
# TEST: center feature on a map
Map.centerObject(country)
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
| 37.345679
| 422
| 0.734215
|
e945868dbc65301875db5c13f49865b3873fa351
| 4,774
|
py
|
Python
|
tests/test_rocket.py
|
KianPardoe/RocketPy
|
d3d835fa7fd71fea636132616b2eea8917729875
|
[
"MIT"
] | 70
|
2018-11-28T10:53:43.000Z
|
2020-10-24T15:48:58.000Z
|
tests/test_rocket.py
|
RogueXT/RocketPy
|
94fc5bc99442526f6f7890172764838883d5cd11
|
[
"MIT"
] | 33
|
2018-09-10T02:28:25.000Z
|
2020-10-19T17:05:13.000Z
|
tests/test_rocket.py
|
RogueXT/RocketPy
|
94fc5bc99442526f6f7890172764838883d5cd11
|
[
"MIT"
] | 23
|
2018-11-30T22:59:50.000Z
|
2020-10-24T16:22:21.000Z
|
from unittest.mock import patch
import pytest
from rocketpy import Environment, SolidMotor, Rocket, Flight, Parachute
@patch("matplotlib.pyplot.show")
def test_rocket(mock_show):
test_motor = SolidMotor(
thrustSource="data/motors/Cesaroni_M1670.eng",
burnOut=3.9,
grainNumber=5,
grainSeparation=5 / 1000,
grainDensity=1815,
grainOuterRadius=33 / 1000,
grainInitialInnerRadius=15 / 1000,
grainInitialHeight=120 / 1000,
nozzleRadius=33 / 1000,
throatRadius=11 / 1000,
interpolationMethod="linear",
)
test_rocket = Rocket(
motor=test_motor,
radius=127 / 2000,
mass=19.197 - 2.956,
inertiaI=6.60,
inertiaZ=0.0351,
distanceRocketNozzle=-1.255,
distanceRocketPropellant=-0.85704,
powerOffDrag="data/calisto/powerOffDragCurve.csv",
powerOnDrag="data/calisto/powerOnDragCurve.csv",
)
test_rocket.setRailButtons([0.2, -0.5])
NoseCone = test_rocket.addNose(
length=0.55829, kind="vonKarman", distanceToCM=0.71971
)
FinSet = test_rocket.addFins(
4, span=0.100, rootChord=0.120, tipChord=0.040, distanceToCM=-1.04956
)
Tail = test_rocket.addTail(
topRadius=0.0635, bottomRadius=0.0435, length=0.060, distanceToCM=-1.194656
)
def drogueTrigger(p, y):
# p = pressure
# y = [x, y, z, vx, vy, vz, e0, e1, e2, e3, w1, w2, w3]
# activate drogue when vz < 0 m/s.
return True if y[5] < 0 else False
def mainTrigger(p, y):
# p = pressure
# y = [x, y, z, vx, vy, vz, e0, e1, e2, e3, w1, w2, w3]
# activate main when vz < 0 m/s and z < 800 m.
return True if y[5] < 0 and y[2] < 800 else False
Main = test_rocket.addParachute(
"Main",
CdS=10.0,
trigger=mainTrigger,
samplingRate=105,
lag=1.5,
noise=(0, 8.3, 0.5),
)
Drogue = test_rocket.addParachute(
"Drogue",
CdS=1.0,
trigger=drogueTrigger,
samplingRate=105,
lag=1.5,
noise=(0, 8.3, 0.5),
)
static_margin = test_rocket.staticMargin(0)
assert test_rocket.allInfo() == None or not abs(static_margin - 2.05) < 0.01
@patch("matplotlib.pyplot.show")
def test_airfoil(mock_show):
test_motor = SolidMotor(
thrustSource="data/motors/Cesaroni_M1670.eng",
burnOut=3.9,
grainNumber=5,
grainSeparation=5 / 1000,
grainDensity=1815,
grainOuterRadius=33 / 1000,
grainInitialInnerRadius=15 / 1000,
grainInitialHeight=120 / 1000,
nozzleRadius=33 / 1000,
throatRadius=11 / 1000,
interpolationMethod="linear",
)
test_rocket = Rocket(
motor=test_motor,
radius=127 / 2000,
mass=19.197 - 2.956,
inertiaI=6.60,
inertiaZ=0.0351,
distanceRocketNozzle=-1.255,
distanceRocketPropellant=-0.85704,
powerOffDrag="data/calisto/powerOffDragCurve.csv",
powerOnDrag="data/calisto/powerOnDragCurve.csv",
)
test_rocket.setRailButtons([0.2, -0.5])
NoseCone = test_rocket.addNose(
length=0.55829, kind="vonKarman", distanceToCM=0.71971
)
FinSetNACA = test_rocket.addFins(
2,
span=0.100,
rootChord=0.120,
tipChord=0.040,
distanceToCM=-1.04956,
airfoil=("tests/fixtures/airfoils/NACA0012-radians.txt", "radians"),
)
FinSetE473 = test_rocket.addFins(
2,
span=0.100,
rootChord=0.120,
tipChord=0.040,
distanceToCM=-1.04956,
airfoil=("tests/fixtures/airfoils/e473-10e6-degrees.csv", "degrees"),
)
Tail = test_rocket.addTail(
topRadius=0.0635, bottomRadius=0.0435, length=0.060, distanceToCM=-1.194656
)
def drogueTrigger(p, y):
# p = pressure
# y = [x, y, z, vx, vy, vz, e0, e1, e2, e3, w1, w2, w3]
# activate drogue when vz < 0 m/s.
return True if y[5] < 0 else False
def mainTrigger(p, y):
# p = pressure
# y = [x, y, z, vx, vy, vz, e0, e1, e2, e3, w1, w2, w3]
# activate main when vz < 0 m/s and z < 800 m.
return True if y[5] < 0 and y[2] < 800 else False
Main = test_rocket.addParachute(
"Main",
CdS=10.0,
trigger=mainTrigger,
samplingRate=105,
lag=1.5,
noise=(0, 8.3, 0.5),
)
Drogue = test_rocket.addParachute(
"Drogue",
CdS=1.0,
trigger=drogueTrigger,
samplingRate=105,
lag=1.5,
noise=(0, 8.3, 0.5),
)
static_margin = test_rocket.staticMargin(0)
assert test_rocket.allInfo() == None or not abs(static_margin - 2.03) < 0.01
| 28.248521
| 83
| 0.585253
|
54cd0426afae3365418d6cfc3c2f4f9c5811674a
| 7,843
|
py
|
Python
|
data_import_scripts/sqlalchemy_import.py
|
anapenedos/PhosQuest
|
5a0b363b55850b25f67382b3fa4d227b3ec1c2df
|
[
"Apache-2.0"
] | 2
|
2019-06-08T10:30:53.000Z
|
2021-06-29T13:58:56.000Z
|
data_import_scripts/sqlalchemy_import.py
|
anapenedos/PhosQuest
|
5a0b363b55850b25f67382b3fa4d227b3ec1c2df
|
[
"Apache-2.0"
] | null | null | null |
data_import_scripts/sqlalchemy_import.py
|
anapenedos/PhosQuest
|
5a0b363b55850b25f67382b3fa4d227b3ec1c2df
|
[
"Apache-2.0"
] | 1
|
2021-06-21T22:11:04.000Z
|
2021-06-21T22:11:04.000Z
|
# Standard library imports
from sqlalchemy.inspection import inspect
from datetime import datetime, timedelta
from pandas import isnull
# project imports
from PhosQuest_app.data_access.db_sessions import import_session_maker
from PhosQuest_app.data_access.class_functions import get_classes_key_attrs
# define null-type of values that are treated differently
NULL_VALS = [None, '', ' ', '-', 'nan', 'NaN']
def get_key_vals(df_to_class_dict, classes_keys, row):
"""
Gets the key values for the class instances in a data frame row.
:param df_to_class_dict: data frame heading to class & attribute (dict)
{'DF header': [(Class, 'class_attribute')]}
:param classes_keys: {Class: ['key_attr1', ...], ...} (dict)
:param row: pandas data frame row (df row)
:return: key values for the class instances in the row (dict)
{class: {key_attr: key_value, ...}, ...}
"""
# get keys for classes in row
# dictionary of class to primary key attributes and key values tuples
new_table_keys = {} # {class: {key_attr: key_value, ...}, ...}
# iterate through dict mapping df_heading: (Class, class_attr)
for df_heading, class_matches in df_to_class_dict.items():
for class_match in class_matches:
# df heading corresponds to class and class attribute
class_name = class_match[0]
class_attr = class_match[1]
# if the row contains a non-null value and the df heading
# contains a primary key, add key value to dict
if (class_attr in classes_keys[class_name]
and row[df_heading] not in NULL_VALS
and not isnull(row[df_heading])):
new_values = new_table_keys.setdefault(class_name, {})
new_values[class_attr] = row[df_heading]
return new_table_keys
def get_instances(new_table_keys, session):
"""
Check if records already exist in tables and obtain class instances. Create
new instances if not in tables.
:param new_table_keys: key values for the class instances in the row (dict)
{class: {key_attr: key_value, ...}, ...}
:param session: a sqlalchemy DB session (sqlalchemy session)
:return: dictionary of instance for each class (dict)
{Class: class_instance, ...}
"""
# check if records already exist in tables and obtain class instances
class_instances = {} # {Class: class_instance, ...}
for class_name, keys_info in new_table_keys.items():
# create query object
query_res = session.query(class_name)
# apply filters to the query_res based on primary keys
for key_attr, key_value in keys_info.items():
query_res = query_res.filter(
getattr(class_name, key_attr) == key_value)
# given query_res was filtered on all primary keys in table, it
# should now list a single instance, which can be obtained with
# .first
query_res = query_res.first()
# create new class instance if no record retrieved by query
# and none of the key values is None
if query_res is None:
class_instance = class_name(**keys_info)
session.add(class_instance)
# or get the existing class instance if already in table
else:
class_instance = query_res
# keep track of the new instances
class_instances[class_name] = class_instance
return class_instances
def import_attrs(class_instances, df_to_class_dict, row):
"""
Get instance attributes for each instance from a pandas data frame row.
:param class_instances: dictionary of instance for each class (dict)
{Class: class_instance, ...}
:param df_to_class_dict: data frame heading to class & attribute (dict)
{'DF header': [(Class, 'class_attribute')]}
:param row: pandas data frame row (df row)
:return: updates class_instances dict (None)
"""
# get remaining attributes for each instance
for instance_class_name, class_instance in class_instances.items():
# get the class attributes
for df_heading, class_matches in df_to_class_dict.items():
for class_match in class_matches:
class_name = class_match[0]
class_attr = class_match[1]
if class_name == instance_class_name:
attr = getattr(class_instance, class_attr, None)
# if the existing instance attr is not defined, set it
# to the value in the data frame
if attr in NULL_VALS:
setattr(class_instance, class_attr, row[df_heading])
return
def import_data_from_data_frame(df, df_to_class_dict):
"""
Takes in data frame storing kinase substrate info and populates relevant
entries in SQLite database.
:param df: pandas data frame from PhosphositePlus import (df)
:param df_to_class_dict: data frame heading to class & attribute (dict)
{'DF header': [(Class, 'class_attribute')]}
"""
start_time = datetime.now()
print('Started processing data frame\n%s\n'
'Current time: %s)'
% (df.head(3), start_time.strftime("%d-%m-%Y %H:%M:%S")))
# get classes in data frame from dict function argument
classes_in_df = set()
for df_heading, class_matches in df_to_class_dict.items():
for class_match in class_matches:
class_name = class_match[0]
classes_in_df.add(class_name)
# get classes primary keys attributes
# Class: ['key_attr1', ...]
classes_keys = get_classes_key_attrs(classes_in_df)
# set up a row counter
processed_rows = 0
total_records = len(df)
# Create session maker
DBSession = import_session_maker()
# iterate through the data frame rows (of the data frame containing data to
# import) to:
# 1. set up new instances of classes or retrieve existing instances
# from the db
# 2. populate instance class attributes from data frame data
# 3. generate relationships between instances of different classes
for index, row in df.iterrows():
# Issue print statement every 1000 records
if processed_rows % 1000 == 0:
print('Processing row %i of %i rows in data frame'
% (processed_rows, total_records))
# open a SQLite session
session = DBSession()
# get keys for classes in row
# dictionary of class to primary key attributes and key values tuples
# {class: {key_attr: key_value, ...}, ...}
new_table_keys = get_key_vals(df_to_class_dict, classes_keys, row)
# check if records already exist in tables and obtain class instances
# {Class: class_instance, ...}
class_instances = get_instances(new_table_keys, session)
# get remaining attributes for each instance
import_attrs(class_instances, df_to_class_dict, row)
# if more than one class in the data frame, set up relationships
if len(classes_in_df) > 1:
for class_instance in class_instances.values():
class_instance.add_relationships(class_instances)
# commit the new/updated objects to the DB
session.commit()
session.close()
# update row counter
processed_rows += 1
end_time = datetime.now()
elapsed_time = end_time - start_time
print('Completed processing %i records in data frame\n%s\n'
'Current time: %s\n'
'Time elapsed: %s\n'
% (total_records, df.head(3),
end_time.strftime("%d-%m-%Y %H:%M:%S"),
timedelta(days=elapsed_time.days, seconds=elapsed_time.seconds)))
| 42.857923
| 79
| 0.647074
|
c830d963bcc1f08caf5fac43417c5575986502aa
| 609
|
py
|
Python
|
app/make_db.py
|
brighaus/tarot-tracker
|
32040d7f16362a1463d677fa0f52db56d5f5ad1e
|
[
"MIT"
] | null | null | null |
app/make_db.py
|
brighaus/tarot-tracker
|
32040d7f16362a1463d677fa0f52db56d5f5ad1e
|
[
"MIT"
] | null | null | null |
app/make_db.py
|
brighaus/tarot-tracker
|
32040d7f16362a1463d677fa0f52db56d5f5ad1e
|
[
"MIT"
] | null | null | null |
__author__ = 'brighamhausman'
import app.init_data as idt
import app.db_config as db_cfg
import shelve
import uuid
def dump_db(storage_targ=db_cfg.DB_SHELF):
status = []
dbsrc = idt.get_db(idt.__RAW_FILE_TARGET__)
status.append('looping db' + str(dbsrc))
with shelve.open(storage_targ) as db:
for entry in dbsrc:
id = str(uuid.uuid4())
db[id] = entry
status.append('done looping db')
return status
def dump_test_db():
return dump_db(db_cfg.DB_SHELF_TEST)
if __name__ == '__main__':
db_status = dump_db()
print('db status', db_status)
| 25.375
| 47
| 0.673235
|
3e478dd662f6f9024d19d506f3ca22bdc6acc882
| 912
|
py
|
Python
|
formation/views.py
|
ahemery/formation_usagers
|
eb50682db798922ec393050624c4cbd3bb23675a
|
[
"Apache-2.0"
] | null | null | null |
formation/views.py
|
ahemery/formation_usagers
|
eb50682db798922ec393050624c4cbd3bb23675a
|
[
"Apache-2.0"
] | null | null | null |
formation/views.py
|
ahemery/formation_usagers
|
eb50682db798922ec393050624c4cbd3bb23675a
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
def login_view(request):
return render(request, 'index.html',
{
})
def logout_view(request):
return render(request, 'index.html',
{
})
def index_view(request):
return render(request, 'index.html',
{
})
def formation_view(request, formation=None):
return render(request, 'formation/index.html',
{
})
def formationAdd_view(request):
return render(request, 'formation/add.html',
{
})
def seance_view(request, seance):
return render(request, 'seance.html',
{
})
def stat_view(request):
return render(request, 'stat.html',
{
})
def admin_view(request):
return render(request, 'admin.html',
{
})
| 10.857143
| 50
| 0.506579
|
ecdaac621afb412857435cc94d189f6ed7e9b577
| 2,728
|
py
|
Python
|
tests/zeus/utils/test_builds.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 221
|
2017-07-03T17:29:21.000Z
|
2021-12-07T19:56:59.000Z
|
tests/zeus/utils/test_builds.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 298
|
2017-07-04T18:08:14.000Z
|
2022-03-03T22:24:51.000Z
|
tests/zeus/utils/test_builds.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 24
|
2017-07-15T13:46:45.000Z
|
2020-08-16T16:14:45.000Z
|
from datetime import timedelta
from zeus import factories
from zeus.constants import Status, Result
from zeus.utils import timezone
from zeus.utils.builds import fetch_build_for_revision, merge_build_group
def test_merge_build_group_different_providers(client, default_login, default_revision):
now = timezone.now()
later = now + timedelta(minutes=1)
build1 = factories.BuildFactory.create(
revision=default_revision,
provider="provider1",
date_started=now,
date_finished=now,
)
build2 = factories.BuildFactory.create(
revision=default_revision,
provider="provider2",
date_started=later,
date_finished=later,
)
merged_build = merge_build_group([build1, build2])
assert merged_build.ref == build1.ref
assert merged_build.revision_sha is build1.revision_sha
assert merged_build.label == build1.label
assert merged_build.original == [build1, build2]
assert merged_build.status == Status(max(build1.status.value, build2.status.value))
assert merged_build.result == Result(max(build1.result.value, build2.result.value))
assert merged_build.date_started == now
assert merged_build.date_finished == later
def test_merge_build_group_empty_dates(client, default_login, default_revision):
now = timezone.now()
build1 = factories.BuildFactory.create(
revision=default_revision,
provider="provider1",
date_started=now,
date_finished=now,
)
build2 = factories.BuildFactory.create(
revision=default_revision,
provider="provider2",
date_started=None,
date_finished=None,
)
merged_build = merge_build_group([build1, build2])
assert merged_build.date_started == now
assert merged_build.date_finished == now
def test_fetch_build_with_required_hooks(
client, db_session, default_login, default_tenant, default_repo, default_revision
):
hook1 = factories.HookFactory.create(repository_id=default_repo.id)
hook2 = factories.HookFactory.create(repository_id=default_repo.id)
db_session.commit()
factories.BuildFactory.create(
revision=default_revision,
data={"required_hook_ids": [str(hook1.id), str(hook2.id)]},
hook_id=hook1.id,
passed=True,
)
merged_build = fetch_build_for_revision(default_revision)
assert merged_build.result == Result.failed
factories.BuildFactory.create(
revision=default_revision,
data={"required_hook_ids": [str(hook1.id), str(hook2.id)]},
hook_id=hook2.id,
passed=True,
)
merged_build = fetch_build_for_revision(default_revision)
assert merged_build.result == Result.passed
| 31.72093
| 88
| 0.719575
|
5cd51578386d122ba218a5538c2d40af15721e00
| 1,612
|
py
|
Python
|
tests/integration/test_termination.py
|
jobvs/cf-mendix-buildpack
|
7df5585b5ac8550fd36d21c9d354d74489ff78c0
|
[
"Apache-2.0"
] | 1
|
2022-01-31T09:56:06.000Z
|
2022-01-31T09:56:06.000Z
|
tests/integration/test_termination.py
|
jobvs/cf-mendix-buildpack
|
7df5585b5ac8550fd36d21c9d354d74489ff78c0
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_termination.py
|
jobvs/cf-mendix-buildpack
|
7df5585b5ac8550fd36d21c9d354d74489ff78c0
|
[
"Apache-2.0"
] | 5
|
2018-12-17T09:45:17.000Z
|
2020-11-17T22:08:10.000Z
|
from tests.integration import basetest
class TestCaseTermination(basetest.BaseTest):
# Tests that the process terminates with a stack trace when Python code
# errors. The env variable S3_ENCRYPTION_KEYS is used here, it doesn't
# have a try-except on it.
# TODO determine if we can unit test this / should test this
def test_termination_stacktrace(self):
self.stage_container(
"Mendix8.1.1.58432_StarterApp.mda",
env_vars={"S3_ENCRYPTION_KEYS": "{invalid-json}"},
)
with self.assertRaises(RuntimeError):
self.start_container()
self.assert_string_in_recent_logs(
'json.loads(os.getenv("S3_ENCRYPTION_KEYS"))'
)
def test_termination_broken_application(self):
self.stage_container(
"Sample-StartError-7.23.2.mda",
env_vars={
"DEPLOY_PASSWORD": self._mx_password,
"METRICS_INTERVAL": "10",
},
)
self.start_container(status="unhealthy")
self.assert_string_in_recent_logs("start failed, stopping")
self.assert_string_not_in_recent_logs("health check never passed")
def test_java_crash_triggers_unhealthy(self):
self.stage_container(
"sample-6.2.0.mda",
env_vars={
"DEPLOY_PASSWORD": self._mx_password,
"METRICS_INTERVAL": "10",
},
)
self.start_container()
self.assert_app_running()
self.run_on_container("killall java")
assert self.await_container_status("unhealthy", 60)
| 35.043478
| 75
| 0.630273
|
16547a8e0be6976ee3b841603319a84d7caa0d25
| 117
|
py
|
Python
|
peek/__init__.py
|
ywangd/peek
|
25d196b614acaf9c2f9fe4b8fea36a06554950cd
|
[
"MIT"
] | 16
|
2020-08-31T02:06:23.000Z
|
2022-01-31T23:56:44.000Z
|
peek/__init__.py
|
ywangd/peek
|
25d196b614acaf9c2f9fe4b8fea36a06554950cd
|
[
"MIT"
] | 97
|
2020-08-27T14:51:32.000Z
|
2021-10-21T00:19:31.000Z
|
peek/__init__.py
|
ywangd/peek
|
25d196b614acaf9c2f9fe4b8fea36a06554950cd
|
[
"MIT"
] | 1
|
2021-02-07T13:10:38.000Z
|
2021-02-07T13:10:38.000Z
|
"""Top-level package for peek."""
__author__ = """Yang Wang"""
__email__ = 'ywangd@gmail.com'
__version__ = '0.2.2'
| 19.5
| 33
| 0.65812
|
fb761cbbed13568f9fc7086989be6108735dbef2
| 113,854
|
py
|
Python
|
pywikibot/date.py
|
luzpaz/pywikibot
|
c2f1a2b7a972389d01be4fd0c76cb0677be8011a
|
[
"MIT"
] | null | null | null |
pywikibot/date.py
|
luzpaz/pywikibot
|
c2f1a2b7a972389d01be4fd0c76cb0677be8011a
|
[
"MIT"
] | null | null | null |
pywikibot/date.py
|
luzpaz/pywikibot
|
c2f1a2b7a972389d01be4fd0c76cb0677be8011a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Date data and manipulation module."""
#
# (C) Rob W.W. Hooft, 2003
# (C) Daniel Herding, 2004
# (C) Ævar Arnfjörð Bjarmason, 2004
# (C) Andre Engels, 2004-2005
# (C) Yuri Astrakhan, 2005-2006 (<Firstname><Lastname>@gmail.com)
# (years/decades/centuries/millenniums str <=> int conversions)
# (C) Pywikibot team, 2004-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import calendar
import datetime
import re
from string import digits as _decimalDigits # noqa: N812
from pywikibot.textlib import NON_LATIN_DIGITS
from pywikibot.tools import first_lower, first_upper, deprecated, UnicodeType
#
# Different collections of well known formats
#
enMonthNames = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
dayMnthFmts = ['Day_' + str(s) for s in enMonthNames] # e.g. 'Day_January'
yrMnthFmts = ['Year_' + str(s) for s in enMonthNames] # e.g. 'Year_January'
# the order of these lists is important
adDateFormats = ['YearAD', 'DecadeAD', 'CenturyAD', 'MillenniumAD']
bcDateFormats = ['YearBC', 'DecadeBC', 'CenturyBC', 'MillenniumBC']
dateFormats = bcDateFormats + adDateFormats
decadeFormats = ['DecadeAD', 'DecadeBC']
centuryFormats = ['CenturyAD', 'CenturyBC']
yearFormats = ['YearAD', 'YearBC']
millFormats = ['MillenniumAD', 'MillenniumBC']
snglValsFormats = ['CurrEvents']
def multi(value, tuplst):
"""
Run multiple pattern checks for the same entry.
For example: 1st century, 2nd century, etc.
The tuplst is a list of tuples. Each tuple must contain two functions:
first to encode/decode a single value (e.g. simpleInt), second is a
predicate function with an integer parameter that returns true or false.
When the 2nd function evaluates to true, the 1st function is used.
"""
if isinstance(value, UnicodeType):
# Try all functions, and test result against predicates
for func, pred in tuplst:
try:
res = func(value)
if pred(res):
return res
except Exception:
pass
else:
# Find a predicate that gives true for this int value, and run a
# function
for func, pred in tuplst:
if pred(value):
return func(value)
raise ValueError('could not find a matching function')
#
# Helper functions that aid with single value no corrections encoding/decoding.
# Various filters are item dependent.
#
def dh_noConv(value, pattern, limit):
"""Helper for decoding an integer value, no conversion, no rounding."""
return dh(value, pattern, lambda i: i, decSinglVal, limit)
def dh_dayOfMnth(value, pattern):
"""
Helper for decoding a single integer value.
The single integer should be <=31, no conversion,
no rounding (used in days of month).
"""
# For now use January because it has all 31 days
return dh_noConv(value, pattern, formatLimits[dayMnthFmts[0]][0])
def dh_mnthOfYear(value, pattern):
"""
Helper for decoding a single integer value.
The value should be >=1000, no conversion,
no rounding (used in month of the year)
"""
return dh_noConv(value, pattern, _formatLimit_MonthOfYear[0])
def dh_decAD(value, pattern):
"""
Helper for decoding a single integer value.
It should be no conversion, round to decimals (used in decades)
"""
return dh(value, pattern, encDec0, decSinglVal,
formatLimits['DecadeAD'][0])
def dh_decBC(value, pattern):
"""
Helper for decoding a single integer value.
It should be no conversion, round to decimals (used in decades)
"""
return dh(value, pattern, encDec0, decSinglVal,
formatLimits['DecadeBC'][0])
def dh_yearBC(value, pattern):
"""Helper for decoding a year value.
The value should have no conversion, no rounding, limits to 3000.
"""
return dh_noConv(value, pattern, formatLimits['YearBC'][0])
def dh_yearAD(value, pattern):
"""Helper for decoding a year value.
The value should have no conversion, no rounding, limits to 3000.
"""
return dh_noConv(value, pattern, formatLimits['YearAD'][0])
def dh_simpleYearAD(value):
"""Helper for decoding a single integer value.
This value should be representing a year with no extra symbols.
"""
return dh_yearAD(value, '%d')
def dh_number(value, pattern):
"""Helper for decoding a number."""
return dh_noConv(value, pattern, formatLimits['Number'][0])
def dh_centuryAD(value, pattern):
"""Helper for decoding an AD century."""
return dh_noConv(value, pattern, formatLimits['CenturyAD'][0])
def dh_centuryBC(value, pattern):
"""Helper for decoding an BC century."""
return dh_noConv(value, pattern, formatLimits['CenturyBC'][0])
def dh_millenniumAD(value, pattern):
"""Helper for decoding an AD millennium."""
return dh_noConv(value, pattern, formatLimits['MillenniumAD'][0])
def dh_millenniumBC(value, pattern):
"""Helper for decoding an BC millennium."""
return dh_noConv(value, pattern, formatLimits['MillenniumBC'][0])
def decSinglVal(v):
"""Return first item in list v."""
return v[0]
@deprecated(since='20151014')
def encNoConv(i):
"""Return i."""
return i
def encDec0(i):
"""Round to the nearest decade, decade starts with a '0'-ending year."""
return (i // 10) * 10
def encDec1(i):
"""Round to the nearest decade, decade starts with a '1'-ending year."""
return encDec0(i) + 1
def slh(value, lst):
"""Helper function for simple list value matching.
!!!!! The index starts at 1, so 1st element has index 1, not 0 !!!!!
Usually it will be used as a lambda call in a map::
lambda v: slh(v, ['January','February',...])
Usage scenarios::
formats['MonthName']['en'](1) => 'January'
formats['MonthName']['en']('January') => 1
formats['MonthName']['en']('anything else') => raise ValueError
"""
if isinstance(value, UnicodeType):
return lst.index(value) + 1
else:
return lst[value - 1]
def dh_singVal(value, match):
"""Helper function to match a single value to a constant."""
return dh_constVal(value, 0, match)
def dh_constVal(value, ind, match):
"""Helper function to match a single value to a constant.
formats['CurrEvents']['en'](ind) => 'Current Events'
formats['CurrEvents']['en']('Current Events') => ind
"""
if isinstance(value, UnicodeType):
if value == match:
return ind
else:
raise ValueError()
else:
if value == ind:
return match
else:
raise ValueError('unknown value %d' % value)
def alwaysTrue(x):
"""
Return True, always.
Used for multiple value selection function to accept all other values.
@param x: not used
@return: True
@rtype: bool
"""
return True
def monthName(lang, ind):
"""Return the month name for a language."""
return formats['MonthName'][lang](ind)
# Helper for KN: digits representation
_knDigits = NON_LATIN_DIGITS['kn']
_knDigitsToLocal = {ord(UnicodeType(i)): _knDigits[i] for i in range(10)}
_knLocalToDigits = {ord(_knDigits[i]): UnicodeType(i) for i in range(10)}
# Helper for Urdu/Persian languages
_faDigits = NON_LATIN_DIGITS['fa']
_faDigitsToLocal = {ord(UnicodeType(i)): _faDigits[i] for i in range(10)}
_faLocalToDigits = {ord(_faDigits[i]): UnicodeType(i) for i in range(10)}
# Helper for HI:, MR:
_hiDigits = NON_LATIN_DIGITS['hi']
_hiDigitsToLocal = {ord(UnicodeType(i)): _hiDigits[i] for i in range(10)}
_hiLocalToDigits = {ord(_hiDigits[i]): UnicodeType(i) for i in range(10)}
# Helper for BN:
_bnDigits = NON_LATIN_DIGITS['bn']
_bnDigitsToLocal = {ord(UnicodeType(i)): _bnDigits[i] for i in range(10)}
_bnLocalToDigits = {ord(_bnDigits[i]): UnicodeType(i) for i in range(10)}
# Helper for GU:
_guDigits = NON_LATIN_DIGITS['gu']
_guDigitsToLocal = {ord(UnicodeType(i)): _guDigits[i] for i in range(10)}
_guLocalToDigits = {ord(_guDigits[i]): UnicodeType(i) for i in range(10)}
def intToLocalDigitsStr(value, digitsToLocalDict):
"""Encode an integer value into a textual form."""
return UnicodeType(value).translate(digitsToLocalDict)
def localDigitsStrToInt(value, digitsToLocalDict, localToDigitsDict):
"""Convert digits to integer."""
# First make sure there are no real digits in the string
tmp = value.translate(digitsToLocalDict) # Test
if tmp == value:
return int(value.translate(localToDigitsDict)) # Convert
else:
raise ValueError('string contains regular digits')
# Helper for roman numerals number representation
_romanNumbers = ['-', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX',
'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI', 'XVII', 'XVIII',
'XIX', 'XX', 'XXI', 'XXII', 'XXIII', 'XXIV', 'XXV', 'XXVI',
'XXVII', 'XXVIII', 'XXIX', 'XXX']
def intToRomanNum(i):
"""Convert integer to roman numeral."""
if i >= len(_romanNumbers):
raise IndexError('Roman value %i is not defined' % i)
return _romanNumbers[i]
def romanNumToInt(v):
"""Convert roman numeral to integer."""
return _romanNumbers.index(v)
# Each tuple must 3 parts: a list of all possible digits (symbols), encoder
# (from int to a u-string) and decoder (from u-string to an int)
_digitDecoders = {
# %% is a %
'%': '%',
# %d is a decimal
'd': (_decimalDigits, UnicodeType, int),
# %R is a roman numeral. This allows for only the simplest linear
# conversions based on a list of numbers
'R': ('IVX', intToRomanNum, romanNumToInt),
# %K is a number in KN::
'K': (_knDigits, lambda v: intToLocalDigitsStr(v, _knDigitsToLocal),
lambda v: localDigitsStrToInt(v, _knDigitsToLocal,
_knLocalToDigits)),
# %F is a number in FA:
'F': (_faDigits, lambda v: intToLocalDigitsStr(v, _faDigitsToLocal),
lambda v: localDigitsStrToInt(v, _faDigitsToLocal,
_faLocalToDigits)),
# %H is a number in HI:
'H': (_hiDigits, lambda v: intToLocalDigitsStr(v, _hiDigitsToLocal),
lambda v: localDigitsStrToInt(v, _hiDigitsToLocal,
_hiLocalToDigits)),
# %B is a number in BN:
'B': (_bnDigits, lambda v: intToLocalDigitsStr(v, _bnDigitsToLocal),
lambda v: localDigitsStrToInt(v, _bnDigitsToLocal,
_bnLocalToDigits)),
# %G is a number in GU:
'G': (_guDigits, lambda v: intToLocalDigitsStr(v, _guDigitsToLocal),
lambda v: localDigitsStrToInt(v, _guDigitsToLocal,
_guLocalToDigits)),
# %T is a year in TH: -- all years are shifted: 2005 => 'พ.ศ. 2548'
'T': (_decimalDigits, lambda v: UnicodeType(v + 543),
lambda v: int(v) - 543),
}
# Allows to search for '(%%)|(%d)|(%R)|...", and allows one digit 1-9 to set
# the size of zero-padding for numbers
_reParameters = re.compile('|'.join('(%%[1-9]?%s)' % s
for s in _digitDecoders))
# A map of sitecode+pattern to (re matching object and corresponding decoders)
_escPtrnCache2 = {}
_listTypes = [list, tuple]
def escapePattern2(pattern):
"""
Convert a string pattern into a regex expression and cache.
Allows matching of any _digitDecoders inside the string.
Returns a compiled regex object and a list of digit decoders.
"""
if pattern not in _escPtrnCache2:
newPattern = '^' # beginning of the string
strPattern = ''
decoders = []
for s in _reParameters.split(pattern):
if s is None:
continue
if (len(s) in (2, 3) and s[0] == '%'
and s[-1] in _digitDecoders
and(len(s) == 2 or s[1] in _decimalDigits)):
# Must match a "%2d" or "%d" style
dec = _digitDecoders[s[-1]]
if isinstance(dec, UnicodeType):
# Special case for strings that are replaced instead of
# decoded
assert len(s) < 3, (
'Invalid pattern {0}: Cannot use zero padding size '
'in {1}!'.format(pattern, s))
newPattern += re.escape(dec)
strPattern += s # Keep the original text
else:
if len(s) == 3:
# enforce mandatory field size
newPattern += '([%s]{%s})' % (dec[0], s[1])
# add the number of required digits as the last (4th)
# part of the tuple
dec += (int(s[1]),)
else:
newPattern += '([{}]+)'.format(dec[0])
decoders.append(dec)
# All encoders produce a string
# this causes problem with the zero padding.
# Need to rethink
strPattern += '%s'
else:
newPattern += re.escape(s)
strPattern += s
newPattern += '$' # end of the string
compiledPattern = re.compile(newPattern)
_escPtrnCache2[pattern] = (compiledPattern, strPattern, decoders)
return _escPtrnCache2[pattern]
def dh(value, pattern, encf, decf, filter=None):
"""Function to help with year parsing.
Usually it will be used as a lambda call in a map::
lambda v: dh(v, 'pattern string', encf, decf)
@param encf:
Converts from an integer parameter to another integer or a tuple of
integers. Depending on the pattern, each integer will be converted to a
proper string representation, and will be passed as a format argument
to the pattern::
pattern % encf(value)
This function is a complement of decf.
@param decf:
Converts a tuple/list of non-negative integers found in the original
value string
into a normalized value. The normalized value can be passed right back
into dh() to produce the original string. This function is a complement
of encf. dh() interprets %d as a decimal and %s as a roman
numeral number.
"""
compPattern, strPattern, decoders = escapePattern2(pattern)
if isinstance(value, UnicodeType):
m = compPattern.match(value)
if m:
# decode each found value using provided decoder
values = [decoder[2](m.group(i + 1))
for i, decoder in enumerate(decoders)]
decValue = decf(values)
assert not isinstance(decValue, UnicodeType), \
'Decoder must not return a string!'
# recursive call to re-encode and see if we get the original
# (may through filter exception)
if value == dh(decValue, pattern, encf, decf, filter):
return decValue
raise ValueError("reverse encoding didn't match")
else:
# Encode an integer value into a textual form.
# This will be called from outside as well as recursivelly to verify
# parsed value
if filter and not filter(value):
raise ValueError('value {} is not allowed'.format(value))
params = encf(value)
# name 'MakeParameter' kept to avoid breaking blame below
MakeParameter = _make_parameter
if type(params) in _listTypes:
assert len(params) == len(decoders), (
'parameter count ({0}) does not match decoder count ({1})'
.format(len(params), len(decoders)))
# convert integer parameters into their textual representation
params = [MakeParameter(decoders[i], param)
for i, param in enumerate(params)]
return strPattern % tuple(params)
else:
assert len(decoders) == 1, (
'A single parameter does not match {0} decoders.'
.format(len(decoders)))
# convert integer parameter into its textual representation
return strPattern % MakeParameter(decoders[0], params)
def _make_parameter(decoder, param):
newValue = decoder[1](param)
if len(decoder) == 4 and len(newValue) < decoder[3]:
# force parameter length by taking the first digit in the list and
# repeating it required number of times
# This converts "205" into "0205" for "%4d"
newValue = decoder[0][0] * (decoder[3] - len(newValue)) + newValue
return newValue
@deprecated(since='20151014')
def MakeParameter(decoder, param):
"""DEPRECATED."""
return _make_parameter(decoder, param)
# All years/decades/centuries/millenniums are designed in such a way
# as to allow for easy date to string and string to date conversion.
# For example, using any map with either an integer or a string will produce
# its opposite value:
# Usage scenarios:
# formats['DecadeAD']['en'](1980) => '1980s'
# formats['DecadeAD']['en']('1980s') => 1980
# formats['DecadeAD']['en']('anything else') => raise ValueError
# (or some other exception?)
# This is useful when trying to decide if a certain article is a localized date
# or not, or generating dates.
# See dh() for additional information.
formats = {
'MonthName': {
'af': lambda v: slh(v, ['Januarie', 'Februarie', 'Maart', 'April',
'Mei', 'Junie', 'Julie', 'Augustus',
'September', 'Oktober', 'November',
'Desember']),
'gsw': lambda v: slh(v, ['Januar', 'Februar', 'März', 'April', 'Mai',
'Juni', 'Juli', 'August', 'September',
'Oktober', 'November', 'Dezember']),
'an': lambda v: slh(v, ['chinero', 'frebero', 'marzo', 'abril',
'mayo', 'chunio', 'chulio', 'agosto',
'setiembre', 'otubre', 'nobiembre',
'abiento']),
'ang': lambda v: slh(v, ['Æfterra Gēola', 'Solmōnaþ', 'Hrēþmōnaþ',
'Ēastermōnaþ', 'Þrimilcemōnaþ', 'Sēremōnaþ',
'Mǣdmōnaþ', 'Wēodmōnaþ', 'Hāligmōnaþ',
'Winterfylleþ', 'Blōtmōnaþ', 'Gēolmōnaþ']),
'ar': lambda v: slh(v, ['يناير', 'فبراير', 'مارس', 'أبريل', 'مايو',
'يونيو', 'يوليو', 'أغسطس', 'سبتمبر',
'أكتوبر', 'نوفمبر', 'ديسمبر']),
'ast': lambda v: slh(v, ['xineru', 'febreru', 'marzu', 'abril',
'mayu', 'xunu', 'xunetu', 'agostu',
'setiembre', 'ochobre', 'payares',
'avientu']),
'be': lambda v: slh(v, ['студзень', 'люты', 'сакавік', 'красавік',
'травень', 'чэрвень', 'ліпень', 'жнівень',
'верасень', 'кастрычнік', 'лістапад',
'сьнежань']),
'bg': lambda v: slh(v, ['януари', 'февруари', 'март', 'април',
'май', 'юни', 'юли', 'август', 'септември',
'октомври', 'ноември', 'декември']),
'bn': lambda v: slh(v, ['জানুয়ারি', 'ফেব্রুয়ারি', 'মার্চ', 'এপ্রিল',
'মে', 'জুন', 'জুলাই', 'আগস্ট', 'সেপ্টেম্বর',
'অক্টোবর', 'নভেম্বর', 'ডিসেম্বর']),
'br': lambda v: slh(v, ['Genver', "C'hwevrer", 'Meurzh', 'Ebrel',
'Mae', 'Mezheven', 'Gouere', 'Eost',
'Gwengolo', 'Here', 'Du', 'Kerzu']),
'bs': lambda v: slh(v, ['januar', 'februar', 'mart', 'april',
'maj', 'juni', 'juli', 'august', 'septembar',
'oktobar', 'novembar', 'decembar']),
'ca': lambda v: slh(v, ['gener', 'febrer', 'març', 'abril', 'maig',
'juny', 'juliol', 'agost', 'setembre',
'octubre', 'novembre', 'desembre']),
'ceb': lambda v: slh(v, ['Enero', 'Pebrero', 'Marso', 'Abril',
'Mayo', 'Hunyo', 'Hulyo', 'Agosto',
'Septiyembre', 'Oktubre', 'Nobiyembre',
'Disyembre']),
'co': lambda v: slh(v, ['ghjennaghju', 'frivaghju', 'marzu',
'aprile', 'maghju', 'ghjugnu', 'lugliu',
'aostu', 'settembre', 'uttrovi', 'nuvembri',
'decembre']),
'cs': lambda v: slh(v, ['leden', 'únor', 'březen', 'duben',
'květen', 'červen', 'červenec', 'srpen',
'září', 'říjen', 'listopad', 'prosinec']),
'csb': lambda v: slh(v, ['stëcznik', 'gromicznik', 'strumiannik',
'łżëkwiôt', 'môj', 'czerwińc', 'lëpinc',
'zélnik', 'séwnik', 'rujan', 'lëstopadnik',
'gòdnik']),
'cv': lambda v: slh(v, ['кăрлач', 'нарăс', 'Пуш', 'Ака', 'çу',
'çĕртме', 'утă', 'çурла', 'авăн', 'юпа', 'чӳк',
'раштав']),
'cy': lambda v: slh(v, ['Ionawr', 'Chwefror', 'Mawrth', 'Ebrill',
'Mai', 'Mehefin', 'Gorffennaf', 'Awst', 'Medi',
'Hydref', 'Tachwedd', 'Rhagfyr']),
'da': lambda v: slh(v, ['januar', 'februar', 'marts', 'april', 'maj',
'juni', 'juli', 'august', 'september',
'oktober', 'november', 'december']),
'de': lambda v: slh(v, ['Januar', 'Februar', 'März', 'April',
'Mai', 'Juni', 'Juli', 'August',
'September', 'Oktober', 'November',
'Dezember']),
'el': lambda v: slh(v, ['Ιανουάριος', 'Φεβρουάριος', 'Μάρτιος',
'Απρίλιος', 'Μάιος', 'Ιούνιος', 'Ιούλιος',
'Αύγουστος', 'Σεπτέμβριος', 'Οκτώβριος',
'Νοέμβριος', 'Δεκέμβριος']),
'en': lambda v: slh(v, enMonthNames),
'eo': lambda v: slh(v, ['Januaro', 'Februaro', 'Marto', 'Aprilo',
'Majo', 'Junio', 'Julio', 'Aŭgusto',
'Septembro', 'Oktobro', 'Novembro',
'Decembro']),
'es': lambda v: slh(v, ['enero', 'febrero', 'marzo', 'abril', 'mayo',
'junio', 'julio', 'agosto', 'septiembre',
'octubre', 'noviembre', 'diciembre']),
'et': lambda v: slh(v, ['jaanuar', 'veebruar', 'märts', 'aprill',
'mai', 'juuni', 'juuli', 'august', 'september',
'oktoober', 'november', 'detsember']),
'eu': lambda v: slh(v, ['urtarrila', 'otsaila', 'martxoa', 'apirila',
'maiatza', 'ekaina', 'uztaila', 'abuztua',
'iraila', 'urria', 'azaroa', 'abendua']),
'fa': lambda v: slh(v, ['ژانویه', 'فوریه', 'مارس', 'آوریل', 'مه',
'ژوئن', 'ژوئیه', 'اوت', 'سپتامبر', 'اکتبر',
'نوامبر', 'دسامبر']),
'fi': lambda v: slh(v, ['tammikuu', 'helmikuu', 'maaliskuu',
'huhtikuu', 'toukokuu', 'kesäkuu',
'heinäkuu', 'elokuu', 'syyskuu', 'lokakuu',
'marraskuu', 'joulukuu']),
'fo': lambda v: slh(v, ['januar', 'februar', 'mars', 'apríl', 'mai',
'juni', 'juli', 'august', 'september',
'oktober', 'november', 'desember']),
'fr': lambda v: slh(v, ['janvier', 'février', 'mars (mois)',
'avril', 'mai', 'juin', 'juillet', 'août',
'septembre', 'octobre', 'novembre',
'décembre']),
'fur': lambda v: slh(v, ['Zenâr', 'Fevrâr', 'Març', 'Avrîl', 'Mai',
'Jugn', 'Lui', 'Avost', 'Setembar', 'Otubar',
'Novembar', 'Dicembar']),
'fy': lambda v: slh(v, ['jannewaris', 'febrewaris', 'maart', 'april',
'maaie', 'juny', 'july', 'augustus',
'septimber', 'oktober', 'novimber',
'desimber']),
'ga': lambda v: slh(v, ['Eanáir', 'Feabhra', 'Márta', 'Aibreán',
'Bealtaine', 'Meitheamh', 'Iúil', 'Lúnasa',
'Meán Fómhair', 'Deireadh Fómhair', 'Samhain',
'Nollaig']),
'gl': lambda v: slh(v, ['xaneiro', 'febreiro', 'marzo', 'abril',
'maio', 'xuño', 'xullo', 'agosto', 'setembro',
'outubro', 'novembro', 'decembro']),
'he': lambda v: slh(v, ['ינואר', 'פברואר', 'מרץ', 'אפריל', 'מאי',
'יוני', 'יולי', 'אוגוסט', 'ספטמבר', 'אוקטובר',
'נובמבר', 'דצמבר']),
'hi': lambda v: slh(v, ['जनवरी', 'फ़रवरी', 'मार्च', 'अप्रैल', 'मई',
'जून', 'जुलाई', 'अगस्त', 'सितम्बर', 'अक्टूबर',
'नवम्बर', 'दिसम्बर']),
'hr': lambda v: slh(v, ['siječanj', 'veljača', 'ožujak', 'travanj',
'svibanj', 'lipanj', 'srpanj', 'kolovoz',
'rujan', 'listopad', 'studeni', 'prosinac']),
'hu': lambda v: slh(v, ['január', 'február', 'március', 'április',
'május', 'június', 'július', 'augusztus',
'szeptember', 'október', 'november',
'december']),
'ia': lambda v: slh(v, ['januario', 'februario', 'martio', 'april',
'maio', 'junio', 'julio', 'augusto',
'septembre', 'octobre', 'novembre',
'decembre']),
'id': lambda v: slh(v, ['Januari', 'Februari', 'Maret', 'April',
'Mei', 'Juni', 'Juli', 'Agustus', 'September',
'Oktober', 'November', 'Desember']),
'ie': lambda v: slh(v, ['januar', 'februar', 'marte', 'april',
'may', 'junio', 'juli', 'august', 'septembre',
'octobre', 'novembre', 'decembre']),
'io': lambda v: slh(v, ['januaro', 'februaro', 'Marto', 'aprilo',
'mayo', 'junio', 'julio', 'agosto',
'septembro', 'oktobro', 'novembro',
'decembro']),
'is': lambda v: slh(v, ['janúar', 'febrúar', 'mars (mánuður)',
'apríl', 'maí', 'júní', 'júlí', 'ágúst',
'september', 'október', 'nóvember',
'desember']),
'it': lambda v: slh(v, ['gennaio', 'febbraio', 'marzo', 'aprile',
'maggio', 'giugno', 'luglio', 'agosto',
'settembre', 'ottobre', 'novembre',
'dicembre']),
'ja': lambda v: slh(v, makeMonthList('%d月')),
'jv': lambda v: slh(v, ['Januari', 'Februari', 'Maret', 'April', 'Mei',
'Juni', 'Juli', 'Agustus', 'September',
'Oktober', 'November', 'Desember']),
'ka': lambda v: slh(v, ['იანვარი', 'თებერვალი', 'მარტი', 'აპრილი',
'მაისი', 'ივნისი', 'ივლისი', 'აგვისტო',
'სექტემბერი', 'ოქტომბერი', 'ნოემბერი',
'დეკემბერი']),
'kn': lambda v: slh(v, ['ಜನವರಿ', 'ಫೆಬ್ರವರಿ', 'ಮಾರ್ಚಿ', 'ಎಪ್ರಿಲ್',
'ಮೇ', 'ಜೂನ', 'ಜುಲೈ', 'ಆಗಸ್ಟ್', 'ಸೆಪ್ಟೆಂಬರ್',
'ಅಕ್ಟೋಬರ್', 'ನವೆಂಬರ್', 'ಡಿಸೆಂಬರ್']),
'ko': lambda v: slh(v, makeMonthList('%d월')),
'ksh': lambda v: slh(v, ['Jannowaa', 'Febrowaa', 'Mä', 'Apprill',
'Meij', 'Juuni', 'Juuli', 'Aujuß',
'Sepptäber', 'Oktoober', 'Novemmber',
'Dezemmber']),
'ku': lambda v: slh(v, ['rêbendan', 'reşemî', 'adar', 'avrêl', 'gulan',
'pûşper', 'tîrmeh', 'gelawêj (meh)', 'rezber',
'kewçêr', 'sermawez', 'berfanbar']),
'kw': lambda v: slh(v, ['Mys Genver', 'Mys Whevrer', 'Mys Merth',
'Mys Ebrel', 'Mys Me', 'Mys Metheven',
'Mys Gortheren', 'Mys Est', 'Mys Gwyngala',
'Mys Hedra', 'Mys Du', 'Mys Kevardhu']),
'la': lambda v: slh(v, ['Ianuarius', 'Februarius', 'Martius',
'Aprilis', 'Maius', 'Iunius', 'Iulius',
'Augustus (mensis)', 'September', 'October',
'November', 'December']),
'lb': lambda v: slh(v, ['Januar', 'Februar', 'Mäerz', 'Abrëll', 'Mee',
'Juni', 'Juli', 'August', 'September',
'Oktober', 'November', 'Dezember']),
'li': lambda v: slh(v, ['jannewarie', 'fibberwarie', 'miert', 'april',
'mei', 'juni', 'juli', 'augustus (maond)',
'september', 'oktober', 'november',
'december']),
'lt': lambda v: slh(v, ['Sausis', 'Vasaris', 'Kovas', 'Balandis',
'Gegužė', 'Birželis', 'Liepa', 'Rugpjūtis',
'Rugsėjis', 'Spalis', 'Lapkritis', 'Gruodis']),
'lv': lambda v: slh(v, ['Janvāris', 'Februāris', 'Marts', 'Aprīlis',
'Maijs', 'Jūnijs', 'Jūlijs', 'Augusts',
'Septembris', 'Oktobris', 'Novembris',
'Decembris']),
'mhr': lambda v: slh(v, ['шорыкйол', 'пургыж', 'ӱярня', 'вӱдшор',
'ага', 'пеледыш', 'сӱрем', 'сорла', 'идым',
'шыжа', 'кылме', 'декабрь']),
'mi': lambda v: slh(v, ['Kohi-tātea', 'Hui-tanguru', 'Poutū-te-rangi',
'Paenga-whāwhā', 'Haratua', 'Pipiri',
'Hōngongoi', 'Here-turi-kōkā', 'Mahuru',
'Whiringa-ā-nuku', 'Whiringa-ā-rangi',
'Hakihea']),
'ml': lambda v: slh(v, ['ജനുവരി', 'ഫെബ്രുവരി', 'മാര്ച്', 'ഏപ്രില്',
'മേയ്', 'ജൂണ്', 'ജൂലൈ', 'ആഗസ്റ്റ്',
'സപ്തന്പര്', 'ഒക്ടോബര്', 'നവന്പര്',
'ഡിസന്പര്']),
'mr': lambda v: slh(v, ['जानेवारी', 'फेब्रुवारी', 'मार्च', 'एप्रिल',
'मे', 'जून', 'जुलै', 'ऑगस्ट', 'सप्टेंबर',
'ऑक्टोबर', 'नोव्हेंबर', 'डिसेंबर']),
'ms': lambda v: slh(v, ['Januari', 'Februari', 'Mac', 'April', 'Mei',
'Jun', 'Julai', 'Ogos', 'September', 'Oktober',
'November', 'Disember']),
'nap': lambda v: slh(v, ['Jennaro', 'Frevaro', 'Màrzo', 'Abbrile',
'Maggio', 'Giùgno', 'Luglio', 'Aùsto',
'Settembre', 'Ottovre', 'Nuvembre',
'Dicembre']),
'nds': lambda v: slh(v, ['Januar', 'Februar', 'März', 'April', 'Mai',
'Juni', 'Juli', 'August', 'September',
'Oktober', 'November', 'Dezember']),
'nl': lambda v: slh(v, ['januari', 'februari', 'maart', 'april', 'mei',
'juni', 'juli', 'augustus (maand)',
'september', 'oktober', 'november',
'december']),
'nn': lambda v: slh(v, ['januar', 'februar', 'månaden mars', 'april',
'mai', 'juni', 'juli', 'august', 'september',
'oktober', 'november', 'desember']),
'nb': lambda v: slh(v, ['januar', 'februar', 'mars', 'april', 'mai',
'juni', 'juli', 'august', 'september',
'oktober', 'november', 'desember']),
'oc': lambda v: slh(v, ['genièr', 'febrièr', 'març', 'abril',
'mai', 'junh', 'julhet', 'agost', 'setembre',
'octobre', 'novembre', 'decembre']),
'os': lambda v: slh(v, ['январь', 'февраль', 'мартъи', 'апрель', 'май',
'июнь', 'июль', 'август', 'сентябрь',
'октябрь', 'ноябрь', 'декабрь']),
'pdc': lambda v: slh(v, ['Yenner', 'Hanning', 'Matz', 'Abril', 'Moi',
'Yuni', 'Yuli', 'Aagscht', 'September',
'Oktower', 'Nowember', 'Disember']),
'pl': lambda v: slh(v, ['styczeń', 'luty', 'marzec', 'kwiecień', 'maj',
'czerwiec', 'lipiec', 'sierpień', 'wrzesień',
'październik', 'listopad', 'grudzień']),
'pt': lambda v: slh(v, ['Janeiro', 'Fevereiro', 'Março', 'Abril',
'Maio', 'Junho', 'Julho', 'Agosto', 'Setembro',
'Outubro', 'Novembro', 'Dezembro']),
'ro': lambda v: slh(v, ['ianuarie', 'februarie', 'martie', 'aprilie',
'mai', 'iunie', 'iulie', 'august',
'septembrie', 'octombrie', 'noiembrie',
'decembrie']),
'ru': lambda v: slh(v, ['январь', 'февраль', 'март', 'апрель', 'май',
'июнь', 'июль', 'август', 'сентябрь',
'октябрь', 'ноябрь', 'декабрь']),
'sc': lambda v: slh(v, ['Ghennarzu', 'Frearzu', 'Martzu',
'Abrile', 'Maju', 'Làmpadas', 'Triulas',
'Aùstu', 'Cabudanni', 'Santugaìne',
'Santadria', 'Nadale']),
'scn': lambda v: slh(v, ['jinnaru', 'frivaru', 'marzu', 'aprili',
'maiu', 'giugnu', 'giugnettu', 'austu',
'sittèmmiru', 'uttùviru', 'nuvèmmiru',
'dicèmmiru']),
'sco': lambda v: slh(v, ['Januar', 'Februar', 'Mairch', 'Aprile',
'Mey', 'Juin', 'Julie', 'August', 'September',
'October', 'November', 'December']),
'se': lambda v: slh(v, ['ođđajagimánnu', 'guovvamánnu', 'njukčamánnu',
'cuoŋománnu', 'miessemánnu', 'geassemánnu',
'suoidnemánnu', 'borgemánnu', 'čakčamánnu',
'golggotmánnu', 'skábmamánnu', 'juovlamánnu']),
'sk': lambda v: slh(v, ['január', 'február', 'marec', 'apríl',
'máj', 'jún', 'júl', 'august', 'september',
'október', 'november', 'december']),
'sl': lambda v: slh(v, ['januar', 'februar', 'marec', 'april', 'maj',
'junij', 'julij', 'avgust', 'september',
'oktober', 'november', 'december']),
'sq': lambda v: slh(v, ['Janari', 'Shkurti', 'Marsi (muaj)', 'Prilli',
'Maji', 'Qershori', 'Korriku', 'Gushti',
'Shtatori', 'Tetori', 'Nëntori', 'Dhjetori']),
'sr': lambda v: slh(v, ['јануар', 'фебруар', 'март', 'април', 'мај',
'јун', 'јул', 'август', 'септембар', 'октобар',
'новембар', 'децембар']),
'su': lambda v: slh(v, ['Januari', 'Pébruari', 'Maret', 'April', 'Méi',
'Juni', 'Juli', 'Agustus', 'Séptémber',
'Oktober', 'Nopémber', 'Désémber']),
'sv': lambda v: slh(v, ['januari', 'februari', 'mars', 'april', 'maj',
'juni', 'juli', 'augusti', 'september',
'oktober', 'november', 'december']),
'ta': lambda v: slh(v, ['ஜனவரி', 'பிப்ரவரி', 'மார்ச்', 'ஏப்ரல்', 'மே',
'ஜூன்', 'ஜூலை', 'ஆகஸ்டு', 'செப்டம்பர்',
'அக்டோபர்', 'நவம்பர்', 'டிசம்பர்']),
'te': lambda v: slh(v, ['జనవరి', 'ఫిబ్రవరి', 'మార్చి', 'ఏప్రిల్',
'మే', 'జూన్', 'జూలై', 'ఆగష్టు', 'సెప్టెంబర్',
'అక్టోబర్', 'నవంబర్', 'డిసెంబర్']),
'th': lambda v: slh(v, ['มกราคม', 'กุมภาพันธ์', 'มีนาคม', 'เมษายน',
'พฤษภาคม', 'มิถุนายน', 'กรกฎาคม', 'สิงหาคม',
'กันยายน', 'ตุลาคม', 'พฤศจิกายน', 'ธันวาคม']),
'tl': lambda v: slh(v, ['Enero', 'Pebrero', 'Marso', 'Abril', 'Mayo',
'Hunyo', 'Hulyo', 'Agosto', 'Setyembre',
'Oktubre', 'Nobyembre', 'Disyembre']),
'tpi': lambda v: slh(v, ['Janueri', 'Februeri', 'Mas', 'Epril', 'Me',
'Jun', 'Julai', 'Ogas', 'Septemba', 'Oktoba',
'Novemba', 'Disemba']),
'tr': lambda v: slh(v, ['Ocak', 'Şubat', 'Mart', 'Nisan', 'Mayıs',
'Haziran', 'Temmuz', 'Ağustos', 'Eylül',
'Ekim', 'Kasım', 'Aralık']),
'tt': lambda v: slh(v, ['Ğínwar', 'Febräl', 'Mart', 'Äpril', 'May',
'Yün', 'Yül', 'August', 'Sentäber', 'Öktäber',
'Nöyäber', 'Dekäber']),
'uk': lambda v: slh(v, ['січень', 'лютий', 'березень', 'квітень',
'травень', 'червень', 'липень', 'серпень',
'вересень', 'жовтень', 'листопад', 'грудень']),
'ur': lambda v: slh(v, ['جنوری', 'فروری', 'مارچ',
'اپريل', 'مئی', 'جون', 'جولائی', 'اگست',
'ستمبر', 'اکتوبر', 'نومبر', 'دسمبر']),
'vec': lambda v: slh(v, ['genaro', 'febraro', 'marzso', 'apriłe',
'majo', 'giugno', 'lujo', 'agosto',
'setenbre', 'otobre', 'novenbre',
'diçenbre']),
'vi': lambda v: slh(v, ['tháng một', 'tháng hai', 'tháng ba',
'tháng tư', 'tháng năm', 'tháng sáu',
'tháng bảy', 'tháng tám', 'tháng chín',
'tháng mười', 'tháng mười một', 'tháng 12']),
'vo': lambda v: slh(v, ['Yanul', 'Febul', 'Mäzul', 'Prilul', 'Mayul',
'Yunul', 'Yulul', 'Gustul', 'Setul', 'Tobul',
'Novul', 'Dekul']),
'wa': lambda v: slh(v, ['djanvî', 'fevrî', 'Måss (moes)', 'avri',
'may', 'djun', 'djulete', 'awousse', 'setimbe',
'octôbe', 'nôvimbe', 'decimbe']),
'zh': lambda v: slh(v, makeMonthList('%d月')),
'nan': lambda v: slh(v, ['It-goe̍h', 'Jī-goe̍h', 'Saⁿ-goe̍h',
'Sì-goe̍h', 'Gō·-goe̍h', 'La̍k-goe̍h',
'Chhit-goe̍h', 'Peh-goe̍h', 'Káu-goe̍h',
'Cha̍p-goe̍h', 'Cha̍p-it-goe̍h',
'Cha̍p-jī-goe̍h']),
},
'Number': {
'ar': lambda v: dh_number(v, '%d (عدد)'),
'be': lambda v: dh_number(v, '%d (лік)'),
'bg': lambda v: dh_number(v, '%d (число)'),
'bs': lambda v: dh_number(v, '%d (broj)'),
'cs': lambda v: dh_number(v, '%d (číslo)'),
'da': lambda v: dh_number(v, '%d (tal)'),
'en': lambda v: dh_number(v, '%d (number)'),
'fa': lambda v: dh_number(v, '%d (عدد)'),
'fi': lambda v: dh_number(v, '%d (luku)'),
'fr': lambda v: dh_number(v, '%d (nombre)'),
'he': lambda v: dh_number(v, '%d (מספר)'),
'hu': lambda v: dh_number(v, '%d (szám)'),
'ia': lambda v: dh_number(v, '%d (numero)'),
'ja': lambda v: dh_number(v, '%d'),
'ko': lambda v: dh_number(v, '%d'),
'ksh': lambda v: dh_number(v, '%d (Zahl)'),
'la': lambda v: dh_number(v, '%d'),
'lt': lambda v: dh_number(v, '%d (skaičius)'),
'nds': lambda v: dh_number(v, '%d (Tall)'),
'nl': lambda v: dh_number(v, '%d (getal)'),
'nn': lambda v: dh_number(v, 'Talet %d'),
'nb': lambda v: dh_number(v, '%d (tall)'),
'nso': lambda v: dh_number(v, '%d (nomoro)'),
'pl': lambda v: dh_number(v, '%d (liczba)'),
'ro': lambda v: dh_number(v, '%d (cifră)'),
'ru': lambda v: dh_number(v, '%d (число)'),
'sk': lambda v: dh_number(v, '%d (číslo)'),
'sl': lambda v: dh_number(v, '%d (število)'),
'sr': lambda v: dh_number(v, '%d (број)'),
'sv': lambda v: dh_number(v, '%d (tal)'),
'th': lambda v: dh_number(v, '%d'), # was %d (จำนวน)
'tl': lambda v: dh_number(v, '%d (bilang)'),
'tr': lambda v: dh_number(v, '%d (sayı)'),
'zh': lambda v: dh_number(v, '%d'),
},
'YearAD': {
'af': dh_simpleYearAD,
'gsw': dh_simpleYearAD,
'an': dh_simpleYearAD,
'ang': dh_simpleYearAD,
'ar': dh_simpleYearAD,
'ast': dh_simpleYearAD,
'az': dh_simpleYearAD,
'be': dh_simpleYearAD,
'bg': dh_simpleYearAD,
'bn': lambda v: dh_yearAD(v, '%B'),
'br': dh_simpleYearAD,
'bs': dh_simpleYearAD,
'ca': dh_simpleYearAD,
'ceb': dh_simpleYearAD,
'cs': dh_simpleYearAD,
'csb': dh_simpleYearAD,
'cv': dh_simpleYearAD,
'cy': dh_simpleYearAD,
'da': dh_simpleYearAD,
'de': dh_simpleYearAD,
'el': dh_simpleYearAD,
'en': dh_simpleYearAD,
'eo': dh_simpleYearAD,
'es': dh_simpleYearAD,
'et': dh_simpleYearAD,
'eu': dh_simpleYearAD,
'fa': lambda v: dh_yearAD(v, '%F (میلادی)'),
'fi': dh_simpleYearAD,
'fo': dh_simpleYearAD,
'fr': dh_simpleYearAD,
'fur': dh_simpleYearAD,
'fy': dh_simpleYearAD,
'ga': dh_simpleYearAD,
'gan': lambda v: dh_yearAD(v, '%d年'),
'gd': dh_simpleYearAD,
'gl': dh_simpleYearAD,
'gu': lambda v: dh_yearAD(v, '%G'),
'he': dh_simpleYearAD,
'hi': lambda v: dh_yearAD(v, '%H'),
'hr': lambda v: dh_yearAD(v, '%d.'),
'hu': dh_simpleYearAD,
'hy': dh_simpleYearAD,
'ia': dh_simpleYearAD,
'id': dh_simpleYearAD,
'ie': dh_simpleYearAD,
'ilo': dh_simpleYearAD,
'io': dh_simpleYearAD,
'is': dh_simpleYearAD,
'it': dh_simpleYearAD,
'ja': lambda v: dh_yearAD(v, '%d年'),
'jbo': lambda v: dh_yearAD(v, '%dmoi nanca'),
'ka': dh_simpleYearAD,
'kn': lambda v: dh_yearAD(v, '%K'),
'ko': lambda v: dh_yearAD(v, '%d년'),
'ksh': lambda v: dh_yearAD(v, 'Joohr %d'),
'ku': dh_simpleYearAD,
'kw': dh_simpleYearAD,
'la': dh_simpleYearAD,
'lb': dh_simpleYearAD,
'li': dh_simpleYearAD,
'lt': dh_simpleYearAD,
'lv': dh_simpleYearAD,
'mi': dh_simpleYearAD,
'mhr': dh_simpleYearAD,
'mk': dh_simpleYearAD,
'ml': dh_simpleYearAD,
'mo': dh_simpleYearAD,
'mr': lambda v: dh_yearAD(v, 'ई.स. %H'),
'ms': dh_simpleYearAD,
'na': dh_simpleYearAD,
'nap': dh_simpleYearAD,
'nds': dh_simpleYearAD,
'nl': dh_simpleYearAD,
'nn': dh_simpleYearAD,
'nb': dh_simpleYearAD,
'nso': dh_simpleYearAD,
'oc': dh_simpleYearAD,
'os': dh_simpleYearAD,
'pdc': dh_simpleYearAD,
'pl': dh_simpleYearAD,
'pt': dh_simpleYearAD,
'rm': dh_simpleYearAD,
'ro': dh_simpleYearAD,
'rup': dh_simpleYearAD,
'ru': lambda v: dh_yearAD(v, '%d год'),
'sco': dh_simpleYearAD,
'scn': dh_simpleYearAD,
'se': dh_simpleYearAD,
'sh': dh_simpleYearAD,
'sk': dh_simpleYearAD,
'sl': dh_simpleYearAD,
'sm': dh_simpleYearAD,
'sq': dh_simpleYearAD,
'sr': dh_simpleYearAD,
'sv': dh_simpleYearAD,
'su': dh_simpleYearAD,
'ta': dh_simpleYearAD,
'te': dh_simpleYearAD,
# 2005 => 'พ.ศ. 2548'
'th': lambda v: dh_yearAD(v, 'พ.ศ. %T'),
'tl': dh_simpleYearAD,
'tpi': dh_simpleYearAD,
'tr': dh_simpleYearAD,
'tt': dh_simpleYearAD,
'uk': dh_simpleYearAD,
'ur': lambda v: dh_yearAD(v, '%dء'),
'uz': dh_simpleYearAD,
'vec': dh_simpleYearAD,
'vi': dh_simpleYearAD,
'vo': dh_simpleYearAD,
'wa': dh_simpleYearAD,
'zh': lambda v: dh_yearAD(v, '%d年'),
'nan': lambda v: dh_yearAD(v, '%d nî'),
},
'YearBC': {
'af': lambda v: dh_yearBC(v, '%d v.C.'),
'ast': lambda v: dh_yearBC(v, '%d edC'),
'be': lambda v: dh_yearBC(v, '%d да н.э.'),
'bg': lambda v: dh_yearBC(v, '%d г. пр.н.е.'),
'bs': lambda v: dh_yearBC(v, '%d p.n.e.'),
'ca': lambda v: dh_yearBC(v, '%d aC'),
'cs': lambda v: dh_yearBC(v, '%d př. n. l.'),
'cy': lambda v: dh_yearBC(v, '%d CC'),
'da': lambda v: dh_yearBC(v, '%d f.Kr.'),
'de': lambda v: dh_yearBC(v, '%d v. Chr.'),
'el': lambda v: dh_yearBC(v, '%d π.Χ.'),
'en': lambda v: dh_yearBC(v, '%d BC'),
'eo': lambda v: dh_yearBC(v, '-%d'),
'es': lambda v: dh_yearBC(v, '%d a. C.'),
'et': lambda v: dh_yearBC(v, '%d eKr'),
'eu': lambda v: dh_yearBC(v, 'K. a. %d'),
'fa': lambda v: dh_yearBC(v, '%d (پیش از میلاد)'),
'fi': lambda v: dh_yearBC(v, '%d eaa.'),
'fo': lambda v: dh_yearBC(v, '%d f. Kr.'),
'fr': lambda v: dh_yearBC(v, '%d av. J.-C.'),
'gl': lambda v: dh_yearBC(v, '-%d'),
'he': lambda v: dh_yearBC(v, '%d לפני הספירה'),
'hr': lambda v: dh_yearBC(v, '%d. pr. Kr.'),
'hu': lambda v: dh_yearBC(v, 'I. e. %d'),
'id': lambda v: dh_yearBC(v, '%d SM'),
'io': lambda v: dh_yearBC(v, '%d aK'),
'is': lambda v: dh_yearBC(v, '%d f. Kr.'),
'it': lambda v: dh_yearBC(v, '%d a.C.'),
'ka': lambda v: dh_yearBC(v, 'ძვ. წ. %d'),
'ko': lambda v: dh_yearBC(v, '기원전 %d년'),
'ksh': lambda v: dh_yearBC(v, 'Joohr %d füür Krėßtůß'),
'la': lambda v: dh_yearBC(v, '%d a.C.n.'),
'lb': lambda v: dh_yearBC(v, '-%d'),
'lt': lambda v: dh_yearBC(v, '%d m. pr. m. e.'),
'lv': lambda v: dh_yearBC(v, '%d p.m.ē.'),
'mk': lambda v: dh_yearBC(v, '%d п.н.е.'),
'ms': lambda v: dh_yearBC(v, '%d SM'),
'nap': lambda v: dh_yearBC(v, '%d AC'),
'nds': lambda v: dh_yearBC(v, '%d v. Chr.'),
'nl': lambda v: dh_yearBC(v, '%d v.Chr.'),
'nn': lambda v: dh_yearBC(v, '-%d'),
'nb': lambda v: dh_yearBC(v, '%d f.Kr.'),
'oc': lambda v: dh_yearBC(v, '-%d'),
'pl': lambda v: dh_yearBC(v, '%d p.n.e.'),
'pt': lambda v: dh_yearBC(v, '%d a.C.'),
'ro': lambda v: dh_yearBC(v, '%d î.Hr.'),
'ru': lambda v: dh_yearBC(v, '%d год до н. э.'),
'scn': lambda v: dh_yearBC(v, '%d a.C.'),
'sk': lambda v: dh_yearBC(v, '%d pred Kr.'),
'sl': lambda v: dh_yearBC(v, '%d pr. n. št.'),
'sq': lambda v: dh_yearBC(v, '%d p.e.s.'),
'sr': lambda v: dh_yearBC(v, '%d. п. н. е.'),
'sv': lambda v: dh_yearBC(v, '%d f.Kr.'),
'sw': lambda v: dh_yearBC(v, '%d KK'),
'ta': lambda v: dh_yearBC(v, 'கி.மு %d'),
'tr': lambda v: dh_yearBC(v, 'M.Ö. %d'),
'tt': lambda v: dh_yearBC(v, 'MA %d'),
'uk': lambda v: dh_yearBC(v, '%d до н. е.'),
'ur': lambda v: dh_yearBC(v, '%d ق م'),
'uz': lambda v: dh_yearBC(v, 'Mil. av. %d'),
'vec': lambda v: dh_yearBC(v, '%d a.C.'),
'vo': lambda v: dh_yearBC(v, '%d b.K.'),
'zh': lambda v: dh_yearBC(v, '前%d年'),
},
'DecadeAD': {
'gsw': lambda v: dh_decAD(v, '%der'),
'ar': lambda v: dh_decAD(v, '%d عقد'),
'ang': lambda v: dh_decAD(v, '%de'),
'ast': lambda v: dh_decAD(v, 'Años %d'),
'bg': lambda v: dh_decAD(v, '%d-те'),
'br': lambda v: dh_decAD(v, 'Bloavezhioù %d'),
'bs': lambda v: dh_decAD(v, '%dte'),
# Unknown what the pattern is, but 1970 is different
'ca': lambda m: multi(m, [
(lambda v: dh_decAD(v, 'Dècada de %d'), lambda p: p == 1970),
(lambda v: dh_decAD(v, 'Dècada del %d'), alwaysTrue)]),
# 1970s => '1970-1979'
'cs': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, '1-9'), lambda p: p == 1),
(lambda v: dh(v, '%d-%d',
lambda i: (encDec0(i), encDec0(i) + 9), decSinglVal),
alwaysTrue)]),
'cy': lambda v: dh_decAD(v, '%dau'),
'da': lambda v: dh_decAD(v, "%d'erne"),
'de': lambda v: dh_decAD(v, '%der'),
'el': lambda v: dh_decAD(v, 'Δεκαετία %d'),
'en': lambda v: dh_decAD(v, '%ds'),
'eo': lambda v: dh_decAD(v, '%d-aj jaroj'),
'es': lambda v: dh_decAD(v, 'Años %d'),
'et': lambda v: dh_decAD(v, '%d. aastad'),
'fa': lambda v: dh_decAD(v, 'دهه %d (میلادی)'),
# decades ending in 00 are spelled differently
'fi': lambda m: multi(m, [
(lambda v: dh_constVal(v, 0, 'Ensimmäinen vuosikymmen'),
lambda p: p == 0),
(lambda v: dh_decAD(v, '%d-luku'), lambda p: (p % 100 != 0)),
(lambda v: dh_decAD(v, '%d-vuosikymmen'), alwaysTrue)]),
'fo': lambda v: dh_decAD(v, '%d-árini'),
'fr': lambda v: dh_decAD(v, 'Années %d'),
'ga': lambda v: dh_decAD(v, '%didí'),
'gan': lambda v: dh_decAD(v, '%d年代'),
'he': lambda m: multi(m, [
(lambda v: dh(v, 'שנות ה־%d',
lambda i: encDec0(i) % 100,
lambda ii: 1900 + ii[0]),
lambda p: p >= 1900 and p < 2000),
# This is a dummy value, just to avoid validation testing.
(lambda v: dh_decAD(v, '%dth decade'),
alwaysTrue)]), # ********** ERROR!!!
'hi': lambda v: dh_decAD(v, '%H का दशक'),
# 1970s => 1970-1979
'hr': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, '1-9'), lambda p: p == 1),
(lambda v: dh(v, '%d-%d',
lambda i: (encDec0(i), encDec0(i) + 9),
lambda ii: ii[0]), alwaysTrue)]),
'hu': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, '0-s évek'), lambda p: p == 1),
(lambda v: dh_decAD(v, '%d-as évek'),
lambda p: (p % 100 // 10) in (0, 2, 3, 6, 8)),
(lambda v: dh_decAD(v, '%d-es évek'), alwaysTrue)]),
'io': lambda v: dh_decAD(v, '%da yari'),
# 1970s => '1971–1980'
'is': lambda v: dh(v, '%d–%d',
lambda i: (encDec1(i), encDec1(i) + 9),
lambda ii: ii[0] - 1),
'it': lambda v: dh_decAD(v, 'Anni %d'),
'ja': lambda v: dh_decAD(v, '%d年代'),
'ka': lambda v: dh_decAD(v, '%d-ები'),
'ko': lambda v: dh_decAD(v, '%d년대'),
'ksh': lambda v: dh_decAD(v, '%d-er Joohre'),
# 1970s => 'Decennium 198' (1971-1980)
'la': lambda v: dh(v, 'Decennium %d',
lambda i: encDec1(i) // 10 + 1,
lambda ii: (ii[0] - 1) * 10),
# 1970s => 'XX amžiaus 8-as dešimtmetis' (1971-1980)
'lt': lambda v: dh(v, '%R amžiaus %d-as dešimtmetis',
lambda i: (encDec1(i) // 100 + 1,
encDec1(i) % 100 // 10 + 1),
lambda v: (v[0] - 1) * 100 + (v[1] - 1) * 10),
# 1970s => 'Ngahurutanga 198' (1971-1980)
'mi': lambda v: dh(v, 'Ngahurutanga %d',
lambda i: encDec0(i) // 10 + 1,
lambda ii: (ii[0] - 1) * 10),
'mhr': lambda v: dh_decAD(v, '%d ийла'),
# 1970s => '1970-1979'
'nl': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, '1-9'), lambda p: p == 1),
(lambda v: dh(v, '%d-%d',
lambda i: (encDec0(i), encDec0(i) + 9), decSinglVal),
alwaysTrue)]),
'nn': lambda v: dh_decAD(v, '%d0-åra'), # FIXME: not sure of this one
'nb': lambda v: dh_decAD(v, '%d-årene'),
'os': lambda v: dh_decAD(v, '%d-тæ'),
# 1970s => 'Lata 70. XX wieku' for anything
# except 1900-1919, 2000-2019,
# etc, in which case its 'Lata 1900-1909'
'pl': lambda m: multi(m, [
(lambda v: dh(v, 'Lata %d-%d',
lambda i: (encDec0(i), encDec0(i) + 9), decSinglVal),
lambda p: p % 100 >= 0 and p % 100 < 20),
(lambda v: dh(v, 'Lata %d. %R wieku',
lambda i: (encDec0(i) % 100, encDec0(i) // 100 + 1),
lambda ii: (ii[1] - 1) * 100 + ii[0]),
alwaysTrue)]),
'pt': lambda v: dh_decAD(v, 'Década de %d'),
'ro': lambda m: multi(m, [
(lambda v: dh_constVal(v, 0, 'Primul deceniu d.Hr.'),
lambda p: p == 0),
(lambda v: dh_decAD(v, 'Anii %d'), alwaysTrue)]),
'ru': lambda v: dh_decAD(v, '%d-е'),
'scn': lambda v: dh_decAD(v, '%dini'),
# 1970 => '70. roky 20. storočia'
'sk': lambda v: dh(v, '%d. roky %d. storočia',
lambda i: (encDec0(i) % 100, encDec0(i) // 100 + 1),
lambda ii: (ii[1] - 1) * 100 + ii[0]),
'sl': lambda v: dh_decAD(v, '%d.'),
'sq': lambda v: dh_decAD(v, 'Vitet %d'),
'sr': lambda v: dh_decAD(v, '%dе'),
'sv': lambda m: multi(m, [
(lambda v: dh_decAD(v, '%d-talet (decennium)'),
lambda p: (p % 100 == 0)),
(lambda v: dh_decAD(v, '%d-talet'), alwaysTrue)]),
'tt': lambda v: dh_decAD(v, '%d. yıllar'),
'uk': lambda m: multi(m, [
(lambda v: dh_decAD(v, '%d-ві'),
lambda p: p == 0 or (p % 100 == 40)),
(lambda v: dh_decAD(v, '%d-ні'), lambda p: p % 1000 == 0),
(lambda v: dh_decAD(v, '%d-ті'), alwaysTrue)]),
'ur': lambda v: dh_decAD(v, '%d کی دہائی'),
'wa': lambda v: dh_decAD(v, 'Anêyes %d'),
'zh': lambda v: dh_decAD(v, '%d年代'),
'nan': lambda v: dh_decAD(v, '%d nî-tāi'),
},
'DecadeBC': {
'de': lambda v: dh_decBC(v, '%der v. Chr.'),
'da': lambda v: dh_decBC(v, "%d'erne f.Kr."),
'en': lambda v: dh_decBC(v, '%ds BC'),
'es': lambda v: dh_decBC(v, 'Años %d adC'),
'et': lambda v: dh_decBC(v, '%d. aastad eKr'),
'eu': lambda v: dh_decBC(v, 'K. a. %dko hamarkada'),
# decades ending in 00 are spelled differently
'fi': lambda m: multi(m, [
(lambda v: dh_constVal(v, 0, 'Ensimmäinen vuosikymmen eaa.'),
lambda p: p == 0),
(lambda v: dh_decBC(v, '%d-luku eaa.'), lambda p: (p % 100 != 0)),
(lambda v: dh_decBC(v, '%d-vuosikymmen eaa.'), alwaysTrue)]),
'fr': lambda v: dh_decBC(v, 'Années -%d'),
'he': lambda v: dh_decBC(v, 'שנות ה־%d לפני הספירה'),
'hr': lambda v: dh_decBC(v, '%dih p.n.e.'),
'hu': lambda m: multi(m, [
(lambda v: dh_constVal(v, 0, 'i. e. 0-s évek'),
lambda p: p == 0),
(lambda v: dh_decBC(v, 'i. e. %d-as évek'),
lambda p: (p % 100 // 10) in (0, 2, 3, 6, 8)),
(lambda v: dh_decBC(v, 'i. e. %d-es évek'), alwaysTrue)]),
'it': lambda v: dh_decBC(v, 'Anni %d a.C.'),
'ka': lambda v: dh_decBC(v, 'ძვ. წ. %d-ები'),
'ksh': lambda v: dh_decBC(v, '%d-er Joohre füür Krėßtůß'),
# uncertain if ksh is right. might go to redirect.
# '19-10 v. Chr.'
'nl': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, '9-1 v.Chr.'), lambda p: p == 1),
(lambda v: dh(v, '%d-%d v.Chr.',
lambda i: (encDec0(i) + 9, encDec0(i)),
lambda ii: ii[1]), alwaysTrue)]),
'pt': lambda v: dh_decBC(v, 'Década de %d a.C.'),
'ro': lambda m: multi(m, [
(lambda v: dh_constVal(v, 0, 'Primul deceniu î.Hr.'),
lambda p: p == 0),
(lambda v: dh_decBC(v, 'Anii %d î.Hr.'), alwaysTrue)]),
'ru': lambda v: dh_decBC(v, '%d-е до н. э.'),
'sl': lambda v: dh_decBC(v, '%d. pr. n. št.'),
'sv': lambda m: multi(m, [
(lambda v: dh_decBC(v, '%d-talet f.Kr. (decennium)'),
lambda p: (p % 100 == 0)),
(lambda v: dh_decBC(v, '%d-talet f.Kr.'), alwaysTrue)]),
'tt': lambda v: dh_decBC(v, 'MA %d. yıllar'),
'uk': lambda m: multi(m, [
(lambda v: dh_decBC(v, '%d-ві до Р.Х.'),
lambda p: p == 0 or (p % 100 == 40)),
(lambda v: dh_decBC(v, '%d-ті до Р.Х.'), alwaysTrue)]),
'ur': lambda v: dh_decBC(v, '%d کی دہائی ق م'),
'zh': lambda v: dh_decBC(v, '前%d年代'),
},
'CenturyAD': {
'af': lambda m: multi(m, [
(lambda v: dh_centuryAD(v, '%dste eeu'),
lambda p: p in (1, 8) or (p >= 20)),
(lambda v: dh_centuryAD(v, '%dde eeu'), alwaysTrue)]),
'gsw': lambda v: dh_centuryAD(v, '%d. Jahrhundert'),
'ang': lambda v: dh_centuryAD(v, '%de gēarhundred'),
'ar': lambda v: dh_centuryAD(v, 'قرن %d'),
'ast': lambda v: dh_centuryAD(v, 'Sieglu %R'),
'be': lambda v: dh_centuryAD(v, '%d стагодзьдзе'),
'bg': lambda v: dh_centuryAD(v, '%d век'),
'br': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, 'Iañ kantved'), lambda p: p == 1),
(lambda v: dh_constVal(v, 2, 'Eil kantved'), lambda p: p == 2),
(lambda v: dh_centuryAD(v, '%Re kantved'), lambda p: p in (2, 3)),
(lambda v: dh_centuryAD(v, '%Rvet kantved'), alwaysTrue)]),
'bs': lambda v: dh_centuryAD(v, '%d. vijek'),
'ca': lambda v: dh_centuryAD(v, 'Segle %R'),
'cs': lambda v: dh_centuryAD(v, '%d. století'),
'cv': lambda v: dh_centuryAD(v, '%R ĕмĕр'),
'cy': lambda m: multi(m, [
(lambda v: dh_centuryAD(v, '%deg ganrif'),
lambda p: p in (17, 19)),
(lambda v: dh_centuryAD(v, '%dain ganrif'), lambda p: p == 21),
(lambda v: dh_centuryAD(v, '%dfed ganrif'), alwaysTrue)]),
'da': lambda v: dh_centuryAD(v, '%d00-tallet'),
'de': lambda v: dh_centuryAD(v, '%d. Jahrhundert'),
'el': lambda m: multi(m, [
(lambda v: dh_centuryAD(v, '%dός αιώνας'), lambda p: p == 20),
(lambda v: dh_centuryAD(v, '%dος αιώνας'), alwaysTrue)]),
'en': lambda m: multi(m, [
(lambda v: dh_centuryAD(v, '%dst century'),
lambda p: p == 1 or (p > 20 and p % 10 == 1)),
(lambda v: dh_centuryAD(v, '%dnd century'),
lambda p: p == 2 or (p > 20 and p % 10 == 2)),
(lambda v: dh_centuryAD(v, '%drd century'),
lambda p: p == 3 or (p > 20 and p % 10 == 3)),
(lambda v: dh_centuryAD(v, '%dth century'), alwaysTrue)]),
'eo': lambda v: dh_centuryAD(v, '%d-a jarcento'),
'es': lambda v: dh_centuryAD(v, 'Siglo %R'),
'et': lambda v: dh_centuryAD(v, '%d. sajand'),
'eu': lambda v: dh_centuryAD(v, '%R. mendea'), # %R. mende
'fa': lambda m: multi(m, [
(lambda v: dh_constVal(v, 20, 'سده ۲۰ (میلادی)'),
lambda p: p == 20),
# This is a dummy value, just to avoid validation testing.
# Later, it should be replaced with a proper 'fa' titles
(lambda v: dh_centuryAD(v, 'سده %d (میلادی)'),
alwaysTrue)]), # ********** ERROR!!!
'fi': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, 'Ensimmäinen vuosisata'),
lambda p: p == 1),
(lambda v: dh(v, '%d00-luku',
lambda i: i - 1,
lambda ii: ii[0] + 1), alwaysTrue)]),
'fo': lambda v: dh_centuryAD(v, '%d. øld'),
'fr': lambda m: multi(m, [
(lambda v: dh_centuryAD(v, '%Rer siècle'), lambda p: p == 1),
(lambda v: dh_centuryAD(v, '%Re siècle'), alwaysTrue)]),
'fy': lambda v: dh_centuryAD(v, '%de ieu'),
'ga': lambda v: dh_centuryAD(v, '%dú haois'),
'gl': lambda v: dh_centuryAD(v, 'Século %R'),
'he': lambda v: dh_centuryAD(v, 'המאה ה־%d'),
'hi': lambda m: multi(m, [
(lambda v: dh_constVal(v, 20, 'बीसवी शताब्दी'), lambda p: p == 20),
# This is a dummy value, just to avoid validation testing.
# Later, it should be replaced with a proper 'fa' titles
(lambda v: dh_centuryAD(v, '%dth century'),
alwaysTrue)]), # ********** ERROR!!!
'hr': lambda v: dh_centuryAD(v, '%d. stoljeće'),
'hu': lambda v: dh_centuryAD(v, '%d. század'),
'id': lambda v: dh_centuryAD(v, 'Abad ke-%d'),
'io': lambda v: dh_centuryAD(v, '%dma yar-cento'),
'it': lambda v: dh_centuryAD(v, '%R secolo'),
'is': lambda v: dh_centuryAD(v, '%d. öldin'),
'ja': lambda v: dh_centuryAD(v, '%d世紀'),
'jv': lambda v: dh_centuryAD(v, 'Abad kaping %d'),
'ka': lambda v: dh_centuryAD(v, '%R საუკუნე'),
'ko': lambda v: dh_centuryAD(v, '%d세기'),
'ku': lambda v: dh_centuryAD(v, "Sedsala %d'an"),
'kw': lambda m: multi(m, [
(lambda v: dh_centuryAD(v, '%dsa kansblydhen'), lambda p: p <= 3),
(lambda v: dh_centuryAD(v, '%da kansblydhen'), lambda p: p == 4),
(lambda v: dh_centuryAD(v, '%des kansblydhen'), lambda p: p == 5),
(lambda v: dh_centuryAD(v, '%dns kansblydhen'), lambda p: p >= 20),
(lambda v: dh_centuryAD(v, '%dves kansblydhen'), alwaysTrue)]),
'ksh': lambda v: dh_centuryAD(v, '%d. Joohunndot'),
'la': lambda v: dh_centuryAD(v, 'Saeculum %d'),
'lb': lambda v: dh_centuryAD(v, '%d. Joerhonnert'),
# Limburgish (li) have individual names for each century
'li': lambda v: slh(v, ['Ierste iew', 'Twiede iew', 'Derde iew',
'Veerde iew', 'Viefde iew', 'Zesde iew',
'Zevende iew', 'Achste iew',
'Negende iew', 'Tiende iew',
'Elfde iew', 'Twelfde iew',
'Dertiende iew', 'Veertiende iew',
'Vieftiende iew', 'Zestiende iew',
'Zeventiende iew', 'Achtiende iew',
'Negentiende iew', 'Twintegste iew',
'Einentwintegste iew',
'Twieëntwintegste iew']),
'lt': lambda v: dh_centuryAD(v, '%R amžius'),
'lv': lambda v: dh_centuryAD(v, '%d. gadsimts'),
'mi': lambda v: dh_centuryAD(v, 'Tua %d rau tau'),
'mk': lambda v: dh_centuryAD(v, '%d век'),
'nds': lambda v: dh_centuryAD(v, '%d. Johrhunnert'),
'nl': lambda v: dh_centuryAD(v, '%de eeuw'),
'nn': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, '1. århundret'), lambda p: p == 1),
(lambda v: dh(v, '%d00-talet', lambda i: i - 1,
lambda ii: ii[0] + 1), alwaysTrue)]),
'nb': lambda v: dh_centuryAD(v, '%d. århundre'),
'os': lambda v: dh_centuryAD(v, '%R æнус'),
'pl': lambda v: dh_centuryAD(v, '%R wiek'),
'pt': lambda v: dh_centuryAD(v, 'Século %R'),
'ro': lambda v: dh_centuryAD(v, 'Secolul %R'),
'ru': lambda v: dh_centuryAD(v, '%R век'),
'scn': lambda v: dh_centuryAD(v, 'Sèculu %R'),
'sk': lambda v: dh_centuryAD(v, '%d. storočie'),
'sl': lambda v: dh_centuryAD(v, '%d. stoletje'),
'sr': lambda v: dh_centuryAD(v, '%d. век'),
'sq': lambda v: dh_centuryAD(v, 'Shekulli %R'),
'sv': lambda v: dh(v, '%d00-talet',
lambda i: i - 1, lambda ii: ii[0] + 1),
'su': lambda v: dh_centuryAD(v, 'Abad ka-%d'),
'th': lambda v: dh_centuryAD(v, 'คริสต์ศตวรรษที่ %d'),
'tr': lambda v: dh_centuryAD(v, '%d. yüzyıl'),
'tt': lambda v: dh_centuryAD(v, '%d. yöz'),
'uk': lambda v: dh_centuryAD(v, '%d століття'),
'ur': lambda v: dh_centuryAD(v, '%d ویں صدی'),
'vi': lambda v: dh_centuryAD(v, 'Thế kỷ %d'),
'wa': lambda v: dh_centuryAD(v, '%dinme sieke'),
'zh': lambda v: dh_centuryAD(v, '%d世纪'),
'nan': lambda v: dh_centuryAD(v, '%d sè-kí'),
},
'CenturyBC': {
'af': lambda m: multi(m, [
(lambda v: dh_centuryBC(v, '%dste eeu v.C.'),
lambda p: p in (1, 8) or (p >= 20)),
(lambda v: dh_centuryBC(v, '%dde eeu v.C.'), alwaysTrue)]),
'bg': lambda v: dh_centuryBC(v, '%d век пр.н.е.'),
'br': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, 'Iañ kantved kt JK'),
lambda p: p == 1),
(lambda v: dh_constVal(v, 2, 'Eil kantved kt JK'),
lambda p: p == 2),
(lambda v: dh_centuryBC(v, '%Re kantved kt JK'),
lambda p: p in (2, 3)),
(lambda v: dh_centuryBC(v, '%Rvet kantved kt JK'), alwaysTrue)]),
'ca': lambda v: dh_centuryBC(v, 'Segle %R aC'),
'cs': lambda v: dh_centuryBC(v, '%d. století př. n. l.'),
'da': lambda v: dh_centuryBC(v, '%d. århundrede f.Kr.'),
'de': lambda v: dh_centuryBC(v, '%d. Jahrhundert v. Chr.'),
'el': lambda v: dh_centuryBC(v, '%dος αιώνας π.Χ.'),
'en': lambda m: multi(m, [
(lambda v: dh_centuryBC(v, '%dst century BC'),
lambda p: p == 1 or (p > 20 and p % 10 == 1)),
(lambda v: dh_centuryBC(v, '%dnd century BC'),
lambda p: p == 2 or (p > 20 and p % 10 == 2)),
(lambda v: dh_centuryBC(v, '%drd century BC'),
lambda p: p == 3 or (p > 20 and p % 10 == 3)),
(lambda v: dh_centuryBC(v, '%dth century BC'), alwaysTrue)]),
'eo': lambda v: dh_centuryBC(v, '%d-a jarcento a.K.'),
'es': lambda v: dh_centuryBC(v, 'Siglo %R adC'),
'et': lambda v: dh_centuryBC(v, '%d. aastatuhat eKr'),
'fi': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, 'Ensimmäinen vuosisata eaa.'),
lambda p: p == 1),
(lambda v: dh(v, '%d00-luku eaa.', lambda i: i - 1,
lambda ii: ii[0] + 1), alwaysTrue)]),
'fr': lambda m: multi(m, [
(lambda v: dh_centuryBC(v, '%Rer siècle av. J.-C.'),
lambda p: p == 1),
(lambda v: dh_centuryBC(v, '%Re siècle av. J.-C.'),
alwaysTrue)]),
'he': lambda v: dh_centuryBC(v, 'המאה ה־%d לפני הספירה'),
'hr': lambda v: dh_centuryBC(v, '%d. stoljeće p.n.e.'),
'id': lambda v: dh_centuryBC(v, 'Abad ke-%d SM'),
'io': lambda v: dh_centuryBC(v, '%dma yar-cento aK'),
'it': lambda v: dh_centuryBC(v, '%R secolo AC'),
'ja': lambda v: dh_centuryBC(v, '紀元前%d世紀'),
'ka': lambda v: dh_centuryBC(v, 'ძვ. წ. %R საუკუნე'),
'ko': lambda v: dh_centuryBC(v, '기원전 %d세기'),
'ksh': lambda v: dh_centuryBC(v, '%d. Joohunndot füür Kreůßtůß'),
# uncertain if ksh is right. might go to redirect.
'la': lambda v: dh_centuryBC(v, 'Saeculum %d a.C.n.'),
'lb': lambda v: dh_centuryBC(v, '%d. Joerhonnert v. Chr.'),
'nl': lambda v: dh_centuryBC(v, '%de eeuw v.Chr.'),
'nn': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, '1. århundret fvt.'),
lambda p: p == 1),
(lambda v: dh(v, '%d00-talet fvt.', lambda i: i - 1,
lambda ii: ii[0] + 1), alwaysTrue)]),
'nb': lambda v: dh_centuryBC(v, '%d. århundre f.Kr.'),
'pl': lambda v: dh_centuryBC(v, '%R wiek p.n.e.'),
'pt': lambda v: dh_centuryBC(v, 'Século %R a.C.'),
'ro': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, 'Secolul I î.Hr.'), lambda p: p == 1),
(lambda v: dh_centuryBC(v, 'Secolul al %R-lea î.Hr.'),
alwaysTrue)]),
'ru': lambda v: dh_centuryBC(v, '%R век до н. э.'),
'scn': lambda v: dh_centuryBC(v, 'Sèculu %R a.C.'),
'sk': lambda v: dh_centuryBC(v, '%d. storočie pred Kr.'),
'sl': lambda v: dh_centuryBC(v, '%d. stoletje pr. n. št.'),
'sq': lambda v: dh_centuryBC(v, 'Shekulli %R p.e.s.'),
'sr': lambda v: dh_centuryBC(v, '%d. век пне.'),
'sv': lambda v: dh(v, '%d00-talet f.Kr.',
lambda i: i - 1, lambda ii: ii[0] + 1),
'tr': lambda v: dh_centuryBC(v, 'MÖ %d. yüzyıl'),
'tt': lambda v: dh_centuryBC(v, 'MA %d. yöz'),
'uk': lambda v: dh_centuryBC(v, '%d століття до Р.Х.'),
'zh': lambda m: multi(m, [
(lambda v: dh_centuryBC(v, '前%d世纪'), lambda p: p < 4),
(lambda v: dh_centuryBC(v, '前%d世紀'), alwaysTrue)]),
},
'CenturyAD_Cat': {
'cs': lambda v: dh_centuryAD(v, '%d. století'),
'da': lambda v: dh_centuryAD(v, '%d. århundrede'),
'nb': lambda v: dh(v, '%d-tallet',
lambda i: (i - 1) * 100,
lambda ii: ii[0] // 100 + 1),
},
'CenturyBC_Cat': {
'cs': lambda v: dh_centuryBC(v, '%d. století př. n. l.'),
'de': lambda v: dh_centuryBC(v, 'Jahr (%d. Jh. v. Chr.)'),
'nb': lambda v: dh(v, '%d-tallet f.Kr.',
lambda i: (i - 1) * 100,
lambda ii: ii[0] // 100 + 1),
},
'MillenniumAD': {
'bg': lambda v: dh_millenniumAD(v, '%d хилядолетие'),
'ca': lambda v: dh_millenniumAD(v, 'Mil·lenni %R'),
'cs': lambda v: dh_millenniumAD(v, '%d. tisíciletí'),
'de': lambda v: dh_millenniumAD(v, '%d. Jahrtausend'),
'el': lambda v: dh_millenniumAD(v, '%dη χιλιετία'),
'en': lambda m: multi(m, [
(lambda v: dh_millenniumAD(v, '%dst millennium'),
lambda p: p == 1 or (p > 20 and p % 10 == 1)),
(lambda v: dh_millenniumAD(v, '%dnd millennium'),
lambda p: p == 2 or (p > 20 and p % 10 == 2)),
(lambda v: dh_millenniumAD(v, '%drd millennium'),
lambda p: p == 3 or (p > 20 and p % 10 == 3)),
(lambda v: dh_millenniumAD(v, '%dth millennium'),
alwaysTrue)]),
'es': lambda v: dh_millenniumAD(v, '%R milenio'),
'fa': lambda v: dh_millenniumAD(v, 'هزاره %R (میلادی)'),
'fi': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, 'Ensimmäinen vuosituhat'),
lambda p: p == 1),
(lambda v: dh_constVal(v, 2, 'Toinen vuosituhat'),
lambda p: p == 2),
(lambda v: dh_constVal(v, 3, 'Kolmas vuosituhat'),
lambda p: p == 3),
(lambda v: dh_constVal(v, 4, 'Neljäs vuosituhat'),
lambda p: p == 4),
(lambda v: dh_constVal(v, 5, 'Viides vuosituhat'),
lambda p: p == 5),
(lambda v: dh(v, '%d000-vuosituhat',
lambda i: i - 1,
lambda ii: ii[0] + 1),
alwaysTrue)]),
'fr': lambda m: multi(m, [
(lambda v: dh_millenniumAD(v, '%Rer millénaire'),
lambda p: p == 1),
(lambda v: dh_millenniumAD(v, '%Re millénaire'), alwaysTrue)]),
'he': lambda m: multi(m, [
(lambda v: dh_millenniumAD(v, 'האלף הראשון %d'), lambda p: p == 1),
(lambda v: dh_millenniumAD(v, 'האלף השני %d'), lambda p: p == 2),
(lambda v: dh_millenniumAD(v, 'האלף השלישי %d'), lambda p: p == 3),
(lambda v: dh_millenniumAD(v, 'האלף הרביעי %d'), lambda p: p == 4),
(lambda v: dh_millenniumAD(v, 'האלף החמישי %d '),
lambda p: p == 5),
(lambda v: dh_millenniumAD(v, 'האלף השישי %d'), lambda p: p == 6),
(lambda v: dh_millenniumAD(v, 'האלף השביעי %d'), lambda p: p == 7),
(lambda v: dh_millenniumAD(v, 'האלף השמיני %d'), lambda p: p == 8),
(lambda v: dh_millenniumAD(v, 'האלף התשיעי %d'), lambda p: p == 9),
(lambda v: dh_millenniumAD(v, 'האלף העשירי %d'),
lambda p: p == 10),
(lambda v: dh_millenniumAD(v, 'האלף ה־%d'), alwaysTrue)]),
'hu': lambda v: dh_millenniumAD(v, '%d. évezred'),
'it': lambda v: dh_millenniumAD(v, '%R millennio'),
'ja': lambda v: dh_millenniumAD(v, '%d千年紀'),
'ka': lambda v: dh_millenniumAD(v, '%R ათასწლეული'),
'ksh': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, 'Eetße Johdousend'),
lambda p: p == 1),
(lambda v: dh_constVal(v, 2, 'Zweijte Johdousend'),
lambda p: p == 2),
(lambda v: dh_constVal(v, 3, 'Drette Johdousend'),
lambda p: p == 3),
(lambda v: dh_constVal(v, 4, 'Veete Johdousend'),
lambda p: p == 4),
(lambda v: dh_constVal(v, 5, 'Föfte Johdousend'),
lambda p: p == 5),
(lambda v: dh_millenniumAD(v, '%d. Johdousend'), alwaysTrue)]),
'lb': lambda v: dh_millenniumAD(v, '%d. Joerdausend'),
'mhr': lambda v: dh_millenniumAD(v, '%R. курым — '),
'lt': lambda v: dh_millenniumAD(v, '%d tūkstantmetis'),
'pt': lambda v: slh(v, [
'Primeiro milénio d.C.', 'Segundo milénio d.C.',
'Terceiro milénio d.C.', 'Quarto milénio d.C.']),
'ro': lambda v: slh(v, ['Mileniul I', 'Mileniul al II-lea',
'Mileniul III']),
'ru': lambda v: dh_millenniumAD(v, '%d тысячелетие'),
'sk': lambda v: dh_millenniumAD(v, '%d. tisícročie'),
'sl': lambda v: dh_millenniumAD(v, '%d. tisočletje'),
'sv': lambda v: dh(v, '%d000-talet (millennium)',
lambda i: i - 1, lambda ii: ii[0] + 1),
'tt': lambda v: dh_millenniumAD(v, '%d. meñyıllıq'),
'ur': lambda m: multi(m, [
(lambda v: dh_constVal(v, 0, '0000مبم'), lambda p: p == 0),
(lambda v: dh_millenniumAD(v, '%d000مبم'), alwaysTrue)]),
},
'MillenniumBC': {
'bg': lambda v: dh_millenniumBC(v, '%d хилядолетие пр.н.е.'),
'ca': lambda v: dh_millenniumBC(v, 'Mil·lenni %R aC'),
'cs': lambda v: dh_millenniumBC(v, '%d. tisíciletí př. n. l.'),
'da': lambda v: dh_millenniumBC(v, '%d. årtusinde f.Kr.'),
'de': lambda v: dh_millenniumBC(v, '%d. Jahrtausend v. Chr.'),
'el': lambda v: dh_millenniumBC(v, '%dη χιλιετία π.Χ.'),
'en': lambda v: dh_millenniumBC(v, '%dst millennium BC'),
'es': lambda v: dh_millenniumBC(v, '%R milenio adC'),
'fi': lambda m: multi(m, [
(lambda v: dh_constVal(v, 1, 'Ensimmäinen vuosituhat eaa.'),
lambda p: p == 1),
(lambda v: dh(v, '%d000-vuosituhat eaa.', lambda i: i - 1,
lambda ii: ii[0] + 1), alwaysTrue)]),
'fr': lambda v: dh_millenniumBC(v, '%Rer millénaire av. J.-C.'),
'he': lambda m: multi(m, [
(lambda v: dh_millenniumAD(v, 'האלף הראשון %d לפני הספירה'),
lambda p: p == 1),
(lambda v: dh_millenniumAD(v, 'האלף השני %d לפני הספירה'),
lambda p: p == 2),
(lambda v: dh_millenniumAD(v, 'האלף השלישי %d לפני הספירה'),
lambda p: p == 3),
(lambda v: dh_millenniumAD(v, 'האלף הרביעי %d לפני הספירה'),
lambda p: p == 4),
(lambda v: dh_millenniumAD(v, 'האלף החמישי %d לפני הספירה'),
lambda p: p == 5),
(lambda v: dh_millenniumAD(v, 'האלף השישי %d לפני הספירה'),
lambda p: p == 6),
(lambda v: dh_millenniumAD(v, 'האלף השביעי %d לפני הספירה'),
lambda p: p == 7),
(lambda v: dh_millenniumAD(v, 'האלף השמיני %d לפני הספירה'),
lambda p: p == 8),
(lambda v: dh_millenniumAD(v, 'האלף התשיעי %d לפני הספירה'),
lambda p: p == 9),
(lambda v: dh_millenniumAD(v, 'האלף העשירי %d לפני הספירה'),
lambda p: p == 10),
(lambda v: dh_millenniumAD(v, 'האלף ה־%d לפני הספירה'),
alwaysTrue)]),
'hu': lambda v: dh_millenniumBC(v, 'I. e. %d. évezred'),
'it': lambda v: dh_millenniumBC(v, '%R millennio AC'),
'ja': lambda v: dh_millenniumBC(v, '紀元前%d千年紀'),
'ka': lambda v: dh_millenniumBC(v, 'ძვ. წ. %R ათასწლეული'),
'lb': lambda v: dh_millenniumBC(v, '%d. Joerdausend v. Chr.'),
'nl': lambda v: dh_millenniumBC(v, '%de millennium v.Chr.'),
'pt': lambda v: slh(v, ['Primeiro milénio a.C.',
'Segundo milénio a.C.',
'Terceiro milénio a.C.',
'Quarto milénio a.C.']),
'ro': lambda v: dh_millenniumBC(v, 'Mileniul %R î.Hr.'),
'ru': lambda v: dh_millenniumBC(v, '%d тысячелетие до н. э.'),
'sv': lambda v: dh(v, '%d000-talet f.Kr. (millennium)',
lambda i: i - 1, lambda ii: ii[0] + 1),
'tt': lambda v: dh_millenniumBC(v, 'MA %d. meñyıllıq'),
'zh': lambda v: dh_millenniumBC(v, '前%d千年'),
},
'Cat_Year_MusicAlbums': {
'cs': lambda v: dh_yearAD(v, 'Alba roku %d'),
'en': lambda v: dh_yearAD(v, '%d albums'),
'fa': lambda v: dh_yearAD(v, 'آلبومهای %d (میلادی)'),
'fi': lambda v: dh_yearAD(v, 'Vuoden %d albumit'),
'fr': lambda v: dh_yearAD(v, 'Album musical sorti en %d'),
'he': lambda v: dh_yearAD(v, 'אלבומי %d'),
'nb': lambda v: dh_yearAD(v, 'Musikkalbum fra %d'),
'pl': lambda v: dh_yearAD(v, 'Albumy muzyczne wydane w roku %d'),
'sl': lambda v: dh_yearAD(v, 'Albumi iz %d'),
'sv': lambda v: dh_yearAD(v, '%d års musikalbum'),
},
'Cat_BirthsAD': {
'an': lambda v: dh_yearAD(v, '%d (naixencias)'),
'ar': lambda v: dh_yearAD(v, 'مواليد %d'),
'arz': lambda v: dh_yearAD(v, 'مواليد %d'),
'bar': lambda v: dh_yearAD(v, 'Geboren %d'),
'be': lambda v: dh_yearAD(v, 'Нарадзіліся ў %d годзе'),
'be-tarask': lambda v: dh_yearAD(v, 'Нарадзіліся ў %d годзе'),
'bg': lambda v: dh_yearAD(v, 'Родени през %d година'),
'bjn': lambda v: dh_yearAD(v, 'Kalahiran %d'),
'bn': lambda v: dh_yearAD(v, '%B-এ জন্ম'),
'bpy': lambda v: dh_yearAD(v, 'মারি %B-এ উজ্জিসিতা'),
'br': lambda v: dh_yearAD(v, 'Ganedigezhioù %d'),
'bs': lambda v: dh_yearAD(v, '%d rođenja'),
'cbk-zam': lambda v: dh_yearAD(v, 'Nacidos en %d'),
'crh': lambda v: dh_yearAD(v, '%d senesinde doğğanlar'),
'cs': lambda v: dh_yearAD(v, 'Narození %d'),
'cy': lambda v: dh_yearAD(v, 'Genedigaethau %d'),
'da': lambda v: dh_yearAD(v, 'Født i %d'),
'de': lambda v: dh_yearAD(v, 'Geboren %d'),
'dsb': lambda v: dh_yearAD(v, 'Roź. %d'),
'el': lambda v: dh_yearAD(v, 'Γεννήσεις το %d'),
'en': lambda v: dh_yearAD(v, '%d births'),
'eo': lambda v: dh_yearAD(v, 'Naskiĝintoj en %d'),
'es': lambda v: dh_yearAD(v, 'Nacidos en %d'),
'et': lambda v: dh_yearAD(v, 'Sündinud %d'),
'eu': lambda v: dh_yearAD(v, '%dko jaiotzak'),
'fi': lambda v: dh_yearAD(v, 'Vuonna %d syntyneet'),
'fa': lambda v: dh_yearAD(v, 'زادگان %F (میلادی)'),
'fr': lambda v: dh_yearAD(v, 'Naissance en %d'),
'ga': lambda v: dh_yearAD(v, 'Daoine a rugadh i %d'),
'gan': lambda v: dh_yearAD(v, '%d年出世'),
'gv': lambda v: dh_yearAD(v, "Ruggyryn 'sy vlein %d"),
'hsb': lambda v: dh_yearAD(v, 'Rodź. %d'),
'hy': lambda v: dh_yearAD(v, '%d ծնունդներ'),
'id': lambda v: dh_yearAD(v, 'Kelahiran %d'),
'is': lambda v: dh_yearAD(v, 'Fólk fætt árið %d'),
'it': lambda v: dh_yearAD(v, 'Nati nel %d'),
'ja': lambda v: dh_yearAD(v, '%d年生'),
'jv': lambda v: dh_yearAD(v, 'Lair %d'),
'ka': lambda v: dh_yearAD(v, 'დაბადებული %d'),
'kk': lambda v: dh_yearAD(v, '%d жылы туғандар'),
'ko': lambda v: dh_yearAD(v, '%d년 태어남'),
'la': lambda v: dh_yearAD(v, 'Nati %d'),
'lb': lambda v: dh_yearAD(v, 'Gebuer %d'),
'lv': lambda v: dh_yearAD(v, '%d. gadā dzimušiel'),
'mk': lambda v: dh_yearAD(v, 'Родени во %d година'),
'ml': lambda v: dh_yearAD(v, '%d-ൽ ജനിച്ചവർ'),
'mn': lambda v: dh_yearAD(v, '%d онд төрөгсөд'),
'mr': lambda v: dh_yearAD(v, 'इ.स. %H मधील जन्म'),
'ms': lambda v: dh_yearAD(v, 'Kelahiran %d'),
'mt': lambda v: dh_yearAD(v, 'Twieldu fl-%d'),
'nah': lambda v: dh_yearAD(v, 'Ōtlācatqueh xiuhpan %d'),
'new': lambda v: dh_yearAD(v, '%Hय् बुगु'),
'nn': lambda v: dh_yearAD(v, 'Fødde i %d'),
'nb': lambda v: dh_yearAD(v, 'Fødsler i %d'),
'oc': lambda v: dh_yearAD(v, 'Naissença en %d'),
'pdc': lambda v: dh_yearAD(v, 'Gebore %d'),
'pl': lambda v: dh_yearAD(v, 'Urodzeni w %d'),
'qu': lambda v: dh_yearAD(v, 'Paqarisqa %d'),
'ro': lambda v: dh_yearAD(v, 'Nașteri în %d'),
'ru': lambda v: dh_yearAD(v, 'Родившиеся в %d году'),
'sah': lambda v: dh_yearAD(v, '%d сыллаахха төрөөбүттэр'),
'se': lambda v: dh_yearAD(v, 'Riegádeamit %d'),
'sh': lambda v: dh_yearAD(v, 'Rođeni %d.'),
'sk': lambda v: dh_yearAD(v, 'Narodenia v %d'),
'sl': lambda v: dh_yearAD(v, 'Rojeni leta %d'),
'sq': lambda v: dh_yearAD(v, 'Lindje %d'),
'sr': lambda v: dh_yearAD(v, 'Рођени %d.'),
'sv': lambda v: dh_yearAD(v, 'Födda %d'),
'sw': lambda v: dh_yearAD(v, 'Waliozaliwa %d'),
'szl': lambda v: dh_yearAD(v, 'Rodzyńi we %d'),
'ta': lambda v: dh_yearAD(v, '%d பிறப்புகள்'),
'te': lambda v: dh_yearAD(v, '%d జననాలు'),
'th': lambda v: dh_yearAD(v, 'บุคคลที่เกิดในปี พ.ศ. %T'),
'tl': lambda v: dh_yearAD(v, 'Ipinanganak noong %d'),
'tr': lambda v: dh_yearAD(v, '%d doğumlular'),
'tt': lambda v: dh_yearAD(v, '%d елда туганнар'),
'uk': lambda v: dh_yearAD(v, 'Народились %d'),
'ur': lambda v: dh_yearAD(v, '%dء کی پیدائشیں'),
'vi': lambda v: dh_yearAD(v, 'Sinh %d'),
'war': lambda v: dh_yearAD(v, 'Mga natawo han %d'),
'yo': lambda v: dh_yearAD(v, 'Àwọn ọjọ́ìbí ní %d'),
'zh': lambda v: dh_yearAD(v, '%d年出生'),
'yue': lambda v: dh_yearAD(v, '%d年出世'),
},
'Cat_DeathsAD': {
'an': lambda v: dh_yearAD(v, '%d (muertes)'),
'ay': lambda v: dh_yearAD(v, 'Jiwäwi %d'),
'ar': lambda v: dh_yearAD(v, 'وفيات %d'),
'ba': lambda v: dh_yearAD(v, '%d йылда үлгәндәр'),
'bar': lambda v: dh_yearAD(v, 'Gestorben %d'),
'be': lambda v: dh_yearAD(v, 'Памерлі ў %d годзе'),
'be-tarask': lambda v: dh_yearAD(v, 'Памерлі ў %d годзе'),
'bg': lambda v: dh_yearAD(v, 'Починали през %d година'),
'bn': lambda v: dh_yearAD(v, '%B-এ মৃত্যু'),
'br': lambda v: dh_yearAD(v, 'Marvioù %d'),
'bs': lambda v: dh_yearAD(v, '%d smrti'),
'crh': lambda v: dh_yearAD(v, '%d senesinde ölgenler'),
'cs': lambda v: dh_yearAD(v, 'Úmrtí %d'),
'cy': lambda v: dh_yearAD(v, 'Marwolaethau %d'),
'da': lambda v: dh_yearAD(v, 'Døde i %d'),
'de': lambda v: dh_yearAD(v, 'Gestorben %d'),
'dsb': lambda v: dh_yearAD(v, 'Wum. %d'),
'el': lambda v: dh_yearAD(v, 'Θάνατοι το %d'),
'en': lambda v: dh_yearAD(v, '%d deaths'),
'eo': lambda v: dh_yearAD(v, 'Mortintoj en %d'),
'es': lambda v: dh_yearAD(v, 'Fallecidos en %d'),
'et': lambda v: dh_yearAD(v, 'Surnud %d'),
'eu': lambda v: dh_yearAD(v, '%deko heriotzak'),
'fa': lambda v: dh_yearAD(v, 'درگذشتگان %F (میلادی)'),
'fi': lambda v: dh_yearAD(v, 'Vuonna %d kuolleet'),
'fr': lambda v: dh_yearAD(v, 'Décès en %d'),
'ga': lambda v: dh_yearAD(v, 'Básanna i %d'),
'gan': lambda v: dh_yearAD(v, '%d年過世'),
'gv': lambda v: dh_yearAD(v, "Baaseyn 'sy vlein %d"),
'hif': lambda v: dh_yearAD(v, '%d maut'),
'hsb': lambda v: dh_yearAD(v, 'Zemr. %d'),
'hy': lambda v: dh_yearAD(v, '%d մահեր'),
'id': lambda v: dh_yearAD(v, 'Kematian %d'),
'is': lambda v: dh_yearAD(v, 'Fólk dáið árið %d'),
'it': lambda v: dh_yearAD(v, 'Morti nel %d'),
'ja': lambda v: dh_yearAD(v, '%d年没'),
'jv': lambda v: dh_yearAD(v, 'Pati %d'),
'ka': lambda v: dh_yearAD(v, 'გარდაცვლილი %d'),
'kk': lambda v: dh_yearAD(v, '%d жылы қайтыс болғандар'),
'ko': lambda v: dh_yearAD(v, '%d년 죽음'),
'krc': lambda v: dh_yearAD(v, '%d джылда ёлгенле'),
'ky': lambda v: dh_yearAD(v, '%d жылы кайтыш болгандар'),
'la': lambda v: dh_yearAD(v, 'Mortui %d'),
'lb': lambda v: dh_yearAD(v, 'Gestuerwen %d'),
'lv': lambda v: dh_yearAD(v, '%d. gadā mirušie'),
'mk': lambda v: dh_yearAD(v, 'Починати во %d година'),
'ml': lambda v: dh_yearAD(v, '%d-ൽ മരിച്ചവർ'),
'mn': lambda v: dh_yearAD(v, '%d онд нас барагсад'),
'ms': lambda v: dh_yearAD(v, 'Kematian %d'),
'mt': lambda v: dh_yearAD(v, 'Mietu fl-%d'),
'nah': lambda v: dh_yearAD(v, '%d miquiztli'),
'nn': lambda v: dh_yearAD(v, 'Døde i %d'),
'nb': lambda v: dh_yearAD(v, 'Dødsfall i %d'),
'oc': lambda v: dh_yearAD(v, 'Decès en %d'),
'pdc': lambda v: dh_yearAD(v, 'Gschtaerewe %d'),
'pl': lambda v: dh_yearAD(v, 'Zmarli w %d'),
'pt': lambda v: dh_yearAD(v, 'Mortos em %d'),
'qu': lambda v: dh_yearAD(v, 'Wañusqa %d'),
'ro': lambda v: dh_yearAD(v, 'Decese în %d'),
'ru': lambda v: dh_yearAD(v, 'Умершие в %d году'),
'sah': lambda v: dh_yearAD(v, '%d сыллаахха өлбүттэр'),
'se': lambda v: dh_yearAD(v, 'Jápmimat %d'),
'sh': lambda v: dh_yearAD(v, 'Umrli %d.'),
'sk': lambda v: dh_yearAD(v, 'Úmrtia v %d'),
'sl': lambda v: dh_yearAD(v, 'Umrli leta %d'),
'sq': lambda v: dh_yearAD(v, 'Vdekje %d'),
'sr': lambda v: dh_yearAD(v, 'Умрли %d.'),
'sv': lambda v: dh_yearAD(v, 'Avlidna %d'),
'sw': lambda v: dh_yearAD(v, 'Waliofariki %d'),
'szl': lambda v: dh_yearAD(v, 'Umarći we %d'),
'ta': lambda v: dh_yearAD(v, '%d இறப்புகள்'),
'te': lambda v: dh_yearAD(v, '%d మరణాలు'),
'th': lambda v: dh_yearAD(v, 'บุคคลที่เสียชีวิตในปี พ.ศ. %T'),
'tl': lambda v: dh_yearAD(v, 'Namatay noong %d'),
'tr': lambda v: dh_yearAD(v, '%d yılında ölenler'),
'tt': lambda v: dh_yearAD(v, '%d елда вафатлар'),
'uk': lambda v: dh_yearAD(v, 'Померли %d'),
'ur': lambda v: dh_yearAD(v, '%dء کی وفیات'),
'vi': lambda v: dh_yearAD(v, 'Mất %d'),
'war': lambda v: dh_yearAD(v, 'Mga namatay han %d'),
'yo': lambda v: dh_yearAD(v, 'Àwọn ọjọ́aláìsí ní %d'),
'zh': lambda v: dh_yearAD(v, '%d年逝世'),
'yue': lambda v: dh_yearAD(v, '%d年死'),
},
'Cat_BirthsBC': {
'en': lambda v: dh_yearBC(v, '%d BC births'),
'nb': lambda v: dh_yearBC(v, 'Fødsler i %d f.Kr.'),
},
'Cat_DeathsBC': {
'en': lambda v: dh_yearBC(v, '%d BC deaths'),
'fr': lambda v: dh_yearBC(v, 'Décès en -%d'),
'nb': lambda v: dh_yearBC(v, 'Dødsfall i %d f.Kr.'),
},
'CurrEvents': {
'an': lambda v: dh_singVal(v, 'Autualidá'),
'ang': lambda v: dh_singVal(v, 'Efenealde belimpas'),
'ar': lambda v: dh_singVal(v, 'الأحداث الجارية'),
'be': lambda v: dh_singVal(v, 'Бягучыя падзеі'),
'bg': lambda v: dh_singVal(v, 'Текущи събития'),
'ca': lambda v: dh_singVal(v, 'Viquipèdia:Actualitat'),
'cs': lambda v: dh_singVal(v, 'Portál:Aktuality'),
'da': lambda v: dh_singVal(v, 'Aktuelle begivenheder'),
'de': lambda v: dh_singVal(v, 'Aktuelle Ereignisse'),
'el': lambda v: dh_singVal(v, 'Τρέχοντα γεγονότα'),
'en': lambda v: dh_singVal(v, 'Current events'),
'eo': lambda v: dh_singVal(v, 'Aktualaĵoj'),
'es': lambda v: dh_singVal(v, 'Actualidad'),
'et': lambda v: dh_singVal(v, 'Current events'),
'fa': lambda v: dh_singVal(v, 'رویدادهای کنونی'),
'fi': lambda v: dh_singVal(v, 'Ajankohtaista'),
'fr': lambda v: dh_singVal(v, 'Actualités'),
'gl': lambda v: dh_singVal(v, 'Novas'),
'he': lambda v: dh_singVal(v, 'אקטואליה'),
'hu': lambda v: dh_singVal(v, 'Friss események'),
'id': lambda v: dh_singVal(v, 'Wikipedia:Peristiwa terkini'),
'io': lambda v: dh_singVal(v, 'Current events'),
'it': lambda v: dh_singVal(v, 'Attualità'),
'ja': lambda v: dh_singVal(v, '最近の出来事'),
'ka': lambda v: dh_singVal(v, 'ახალი ამბები'),
'ko': lambda v: dh_singVal(v, '요즘 화제'),
'ksh': lambda v: dh_singVal(v, 'Et Neuste'),
'ku': lambda v: dh_singVal(v, 'Bûyerên rojane'),
'la': lambda v: dh_singVal(v, 'Nuntii'),
'lb': lambda v: dh_singVal(v, 'Aktualitéit'),
'li': lambda v: dh_singVal(v, "In 't nuujs"),
'mn': lambda v: dh_singVal(v, 'Мэдээ'),
'nl': lambda v: dh_singVal(v, 'In het nieuws'),
'nb': lambda v: dh_singVal(v, 'Aktuelt'),
'os': lambda v: dh_singVal(v, 'Xabar'),
'pl': lambda v: dh_singVal(v, 'Bieżące wydarzenia'),
'pt': lambda v: dh_singVal(v, 'Eventos atuais'),
'ro': lambda v: dh_singVal(v, 'Actualităţi'),
'ru': lambda v: dh_singVal(v, 'Текущие события'),
'scn': lambda v: dh_singVal(v, 'Nutizzî'),
'sk': lambda v: dh_singVal(v, 'Aktuality'),
'sl': lambda v: dh_singVal(v, 'Trenutni dogodki'),
'sr': lambda v: dh_singVal(v, 'Википедија:Актуелности'),
'sv': lambda v: dh_singVal(v, 'Aktuella händelser'),
'su': lambda v: dh_singVal(v, 'Keur lumangsung'),
'ta': lambda v: dh_singVal(v, 'நடப்பு நிகழ்வுகள்'),
'th': lambda v: dh_singVal(v, 'เหตุการณ์ปัจจุบัน'),
'tl': lambda v: dh_singVal(v, 'Kasalukuyang pangyayari'),
'tr': lambda v: dh_singVal(v, 'Güncel olaylar'),
'uk': lambda v: dh_singVal(v, 'Поточні події'),
'ur': lambda v: dh_singVal(v, 'حالیہ واقعات'),
'vi': lambda v: dh_singVal(v, 'Thời sự'),
'wa': lambda v: dh_singVal(v, 'Wikinoveles'),
'yo': lambda v: dh_singVal(v, 'Current events'),
'zh': lambda v: dh_singVal(v, '新闻动态'),
'nan': lambda v: dh_singVal(v, 'Sin-bûn sū-kiāⁿ'),
},
}
#
# Add auto-generated empty dictionaries for DayOfMonth and MonthOfYear articles
#
for dayOfMonth in dayMnthFmts:
formats[dayOfMonth] = {}
for monthOfYear in yrMnthFmts:
formats[monthOfYear] = {}
def addFmt1(lang, isMnthOfYear, patterns):
"""Add 12 month formats for a specific type ('January', 'Feb.').
The function must accept one parameter for the ->int or ->string
conversions, just like everywhere else in the formats map.
The patterns parameter is a list of 12 elements to be used for each month.
@param lang: language code
@type lang: str
"""
assert len(patterns) == 12, 'pattern %s does not have 12 elements' % lang
for i in range(12):
if patterns[i] is not None:
if isMnthOfYear:
formats[yrMnthFmts[i]][lang] = eval(
'lambda v: dh_mnthOfYear(v, "{}")'.format(patterns[i]))
else:
formats[dayMnthFmts[i]][lang] = eval(
'lambda v: dh_dayOfMnth(v, "{}")'.format(patterns[i]))
def addFmt2(lang, isMnthOfYear, pattern, makeUpperCase=None):
"""Update yrMnthFmts and dayMnthFmts using addFmt1."""
addFmt1(lang, isMnthOfYear,
makeMonthNamedList(lang, pattern, makeUpperCase))
def makeMonthList(pattern):
"""Return a list of 12 elements based on the number of the month."""
return [pattern % m for m in range(1, 13)]
def makeMonthNamedList(lang, pattern, makeUpperCase=None):
"""Create a list of 12 elements based on the name of the month.
The language-dependent month name is used as a formatting argument to the
pattern. The pattern must be have one %s that will be replaced by the
localized month name.
Use %%d for any other parameters that should be preserved.
"""
if makeUpperCase is None:
return [pattern % monthName(lang, m) for m in range(1, 13)]
elif makeUpperCase:
f = first_upper
else:
f = first_lower
return [pattern % f(monthName(lang, m)) for m in range(1, 13)]
# Add day of the month formats to the formatting table: "en:May 15"
addFmt2('af', False, '%%d %s', True)
addFmt2('gsw', False, '%%d. %s', True)
addFmt1('an', False, ['%d de chinero', '%d de frebero', '%d de marzo',
"%d d'abril", '%d de mayo', '%d de chunio',
'%d de chulio', "%d d'agosto", '%d de setiembre',
"%d d'otubre", '%d de nobiembre', "%d d'abiento"])
addFmt2('ang', False, '%%d %s', True)
addFmt1('ar', False, ['%d يناير', '%d فبراير', '%d مارس', '%d أبريل',
'%d مايو', '%d يونيو', '%d يوليو', '%d أغسطس',
'%d سبتمبر', '%d أكتوبر', '%d نوفمبر', '%d ديسمبر'])
addFmt1('ast', False, ['%d de xineru', '%d de febreru', '%d de marzu',
"%d d'abril", '%d de mayu', '%d de xunu',
'%d de xunetu', "%d d'agost", '%d de setiembre',
"%d d'ochobre", '%d de payares', "%d d'avientu"])
addFmt1('be', False, ['%d студзеня', '%d лютага', '%d сакавіка',
'%d красавіка', '%d траўня', '%d чэрвеня',
'%d ліпеня', '%d жніўня', '%d верасьня',
'%d кастрычніка', '%d лістапада', '%d сьнежня'])
addFmt2('bg', False, '%%d %s', False)
addFmt2('bn', False, '%s %%B')
addFmt2('bs', False, '%%d. %s', False)
addFmt1('ca', False, ['%d de gener', '%d de febrer', '%d de març',
"%d d'abril", '%d de maig', '%d de juny',
'%d de juliol', "%d d'agost", '%d de setembre',
"%d d'octubre", '%d de novembre', '%d de desembre'])
addFmt2('ceb', False, '%s %%d', True)
addFmt1('co', False, ['%d di ghjennaghju', '%d di frivaghju', '%d di marzu',
"%d d'aprile", '%d di maghju', '%d di ghjugnu',
'%d di lugliu', "%d d'aost", '%d di settembre',
"%d d'uttrovi", '%d di nuvembri', '%d di decembre'])
addFmt2('cs', False, '%%d. %s', False)
addFmt2('csb', False, '%%d %sa', False)
addFmt2('cv', False, '%s, %%d', True)
addFmt2('cy', False, '%%d %s', True)
addFmt2('da', False, '%%d. %s', False)
addFmt2('de', False, '%%d. %s', True)
addFmt1('el', False, ['%d Ιανουαρίου', '%d Φεβρουαρίου', '%d Μαρτίου',
'%d Απριλίου', '%d Μαΐου', '%d Ιουνίου', '%d Ιουλίου',
'%d Αυγούστου', '%d Σεπτεμβρίου', '%d Οκτωβρίου',
'%d Νοεμβρίου', '%d Δεκεμβρίου'])
addFmt2('en', False, '%s %%d', True)
addFmt2('eo', False, '%%d-a de %s', False)
addFmt2('es', False, '%%d de %s', False)
addFmt2('et', False, '%%d. %s', False)
addFmt2('eu', False, '%saren %%d', True)
addFmt1('fa', False, ['%d ژانویه', '%d فوریه', '%d مارس', '%d آوریل',
'%d مه', '%d ژوئن', '%d ژوئیه', '%d اوت',
'%d سپتامبر', '%d اکتبر', '%d نوامبر', '%d دسامبر'])
addFmt2('fi', False, '%%d. %sta', False)
addFmt2('fo', False, '%%d. %s', False)
addFmt1('fr', False, ['%d janvier', '%d février', '%d mars', '%d avril',
'%d mai', '%d juin', '%d juillet', '%d août',
'%d septembre', '%d octobre', '%d novembre',
'%d décembre'])
addFmt2('fur', False, '%%d di %s', True)
addFmt2('fy', False, '%%d %s', False)
addFmt1('ga', False, ['%d Eanáir', '%d Feabhra', '%d Márta', '%d Aibreán',
'%d Bealtaine', '%d Meitheamh', '%d Iúil', '%d Lúnasa',
'%d Meán Fómhair', '%d Deireadh Fómhair', '%d Samhain',
'%d Mí na Nollag'])
addFmt2('gl', False, '%%d de %s', False)
addFmt2('he', False, '%%d ב%s')
addFmt1('hr', False, ['%d. siječnja', '%d. veljače', '%d. ožujka',
'%d. travnja', '%d. svibnja', '%d. lipnja', '%d. srpnja',
'%d. kolovoza', '%d. rujna', '%d. listopada',
'%d. studenog', '%d. prosinca'])
addFmt2('hu', False, '%s %%d.', True)
addFmt2('ia', False, '%%d de %s', False)
addFmt2('id', False, '%%d %s', True)
addFmt2('ie', False, '%%d %s', False)
addFmt2('io', False, '%%d di %s', False)
addFmt1('is', False, ['%d. janúar', '%d. febrúar', '%d. mars', '%d. apríl',
'%d. maí', '%d. júní', '%d. júlí', '%d. ágúst',
'%d. september', '%d. október', '%d. nóvember',
'%d. desember'])
addFmt2('it', False, '%%d %s', False)
addFmt1('ja', False, makeMonthList('%d月%%d日'))
addFmt2('jv', False, '%%d %s', True)
addFmt2('ka', False, '%%d %s')
addFmt1('ko', False, makeMonthList('%d월 %%d일'))
addFmt1('ku', False, ["%d'ê rêbendanê", "%d'ê reşemiyê", "%d'ê adarê",
"%d'ê avrêlê", "%d'ê gulanê", "%d'ê pûşperê",
"%d'ê tîrmehê", "%d'ê gelawêjê", "%d'ê rezberê",
"%d'ê kewçêrê", "%d'ê sermawezê", "%d'ê berfanbarê"])
addFmt1('la', False, ['%d Ianuarii', '%d Februarii', '%d Martii', '%d Aprilis',
'%d Maii', '%d Iunii', '%d Iulii', '%d Augusti',
'%d Septembris', '%d Octobris', '%d Novembris',
'%d Decembris'])
addFmt2('lb', False, '%%d. %s', True)
addFmt1('li', False, ['%d januari', '%d februari', '%d miert', '%d april',
'%d mei', '%d juni', '%d juli', '%d augustus',
'%d september', '%d oktober', '%d november',
'%d december'])
addFmt1('lt', False, ['Sausio %d', 'Vasario %d', 'Kovo %d', 'Balandžio %d',
'Gegužės %d', 'Birželio %d', 'Liepos %d', 'Rugpjūčio %d',
'Rugsėjo %d', 'Spalio %d', 'Lapkričio %d',
'Gruodžio %d'])
addFmt2('lv', False, '%%d. %s', False)
addFmt2('mhr', False, '%%d %s', False)
addFmt1('mk', False, ['%d јануари', '%d февруари', '%d март', '%d април',
'%d мај', '%d јуни', '%d јули', '%d август',
'%d септември', '%d октомври', '%d ноември',
'%d декември'])
addFmt2('ml', False, '%s %%d')
addFmt2('ms', False, '%%d %s', True)
addFmt2('nap', False, "%%d 'e %s", False)
addFmt2('nds', False, '%%d. %s', True)
addFmt1('nl', False, ['%%d %s' % v
for v in ['januari', 'februari', 'maart', 'april', 'mei',
'juni', 'juli', 'augustus', 'september',
'oktober', 'november', 'december']])
addFmt1('nn', False, ['%%d. %s' % v
for v in ['januar', 'februar', 'mars', 'april',
'mai', 'juni', 'juli', 'august', 'september',
'oktober', 'november', 'desember']])
addFmt2('nb', False, '%%d. %s', False)
addFmt1('oc', False, ['%d de genièr', '%d de febrièr', '%d de març',
"%d d'abril", '%d de mai', '%d de junh', '%d de julhet',
"%d d'agost", '%d de setembre', "%d d'octobre",
'%d de novembre', '%d de decembre'])
addFmt1('os', False, ['%d январы', '%d февралы', '%d мартъийы', '%d апрелы',
'%d майы', None, '%d июлы', None, '%d сентябры', None,
'%d ноябры', '%d декабры'])
addFmt1('pl', False, ['%d stycznia', '%d lutego', '%d marca', '%d kwietnia',
'%d maja', '%d czerwca', '%d lipca', '%d sierpnia',
'%d września', '%d października', '%d listopada',
'%d grudnia'])
addFmt2('pt', False, '%%d de %s', True)
addFmt2('ro', False, '%%d %s', False)
addFmt1('ru', False, ['%d января', '%d февраля', '%d марта', '%d апреля',
'%d мая', '%d июня', '%d июля', '%d августа',
'%d сентября', '%d октября', '%d ноября', '%d декабря'])
addFmt2('sco', False, '%%d %s', True)
addFmt2('scn', False, '%%d di %s', False)
addFmt1('se', False, ['ođđajagimánu %d.', 'guovvamánu %d.', 'njukčamánu %d.',
'cuoŋománu %d.', 'miessemánu %d.', 'geassemánu %d.',
'suoidnemánu %d.', 'borgemánu %d.', 'čakčamánu %d.',
'golggotmánu %d.', 'skábmamánu %d.', 'juovlamánu %d.'])
addFmt1('sh', False, makeMonthList('%%d.%d.'))
addFmt2('sk', False, '%%d. %s', False)
addFmt2('sl', False, '%%d. %s', False)
addFmt1('sq', False, ['%d Janar', '%d Shkurt', '%d Mars', '%d Prill', '%d Maj',
'%d Qershor', '%d Korrik', '%d Gusht', '%d Shtator',
'%d Tetor', '%d Nëntor', '%d Dhjetor'])
addFmt2('sr', False, '%%d. %s', False)
addFmt2('su', False, '%%d %s', True)
addFmt2('sv', False, '%%d %s', False)
addFmt2('ta', False, '%s %%d')
addFmt2('te', False, '%s %%d')
addFmt2('th', False, '%%d %s') # %%T
addFmt2('tl', False, '%s %%d')
addFmt2('tr', False, '%%d %s', True)
addFmt2('tt', False, '%%d. %s', True)
addFmt1('uk', False, ['%d січня', '%d лютого', '%d березня', '%d квітня',
'%d травня', '%d червня', '%d липня', '%d серпня',
'%d вересня', '%d жовтня', '%d листопада', '%d грудня'])
addFmt1('ur', False, ['%d جنوری', '%d فروری', '%d مارچ',
'%d اپریل', '%d مئی', '%d جون', '%d جولائی',
'%d اگست', '%d ستمبر', '%d اکتوبر',
'%d نومبر', '%d دسمبر'])
addFmt2('vec', False, '%%d de %s', False)
addFmt1('vi', False, makeMonthList('%%d tháng %d'))
addFmt2('vo', False, '%s %%d', False)
addFmt1('zh', False, makeMonthList('%d月%%d日'))
# Walloon names depend on the day number, thus we must generate various
# different patterns
waMonthNames = ['djanvî', 'fevrî', 'måss', 'avri', 'may', 'djun', 'djulete',
'awousse', 'setimbe', 'octôbe', 'nôvimbe', 'decimbe']
# For month names beginning with a consonant...
for i in (0, 1, 2, 4, 5, 6, 8, 10, 11):
formats[dayMnthFmts[i]]['wa'] = eval(
'lambda m: multi(m, ['
'(lambda v: dh_dayOfMnth(v, "%%dî d\' %s"), lambda p: p == 1), '
'(lambda v: dh_dayOfMnth(v, "%%d d\' %s"), '
'lambda p: p in [2,3,20,22,23]), '
'(lambda v: dh_dayOfMnth(v, "%%d di %s"), alwaysTrue)])'
% (waMonthNames[i], waMonthNames[i], waMonthNames[i]))
# For month names beginning with a vowel...
for i in (3, 7, 9):
formats[dayMnthFmts[i]]['wa'] = eval(
'lambda m: multi(m, ['
'(lambda v: dh_dayOfMnth(v, "%%dî d\' %s"), lambda p: p == 1), '
'(lambda v: dh_dayOfMnth(v, "%%d d\' %s"), alwaysTrue)])'
% (waMonthNames[i], waMonthNames[i]))
# Brazil uses '1añ' for the 1st of every month, and number without suffix for
# all other days
brMonthNames = makeMonthNamedList('br', '%s', True)
for i in range(0, 12):
formats[dayMnthFmts[i]]['br'] = eval(
'lambda m: multi(m, ['
'(lambda v: dh_dayOfMnth(v, "%%dañ %s"), lambda p: p == 1), '
'(lambda v: dh_dayOfMnth(v, "%%d %s"), alwaysTrue)])'
% (brMonthNames[i], brMonthNames[i]))
#
# Month of the Year: "en:May 1976"
#
addFmt2('af', True, '%s %%d', True)
addFmt2('ar', True, '%s %%d')
addFmt2('ang', True, '%s %%d', True)
addFmt2('cs', True, '%s %%d')
addFmt2('de', True, '%s %%d', True)
addFmt1('el', True, ['Ιανουάριος %d', 'Φεβρουάριος %d', 'Μάρτιος %d',
'Απρίλιος %d', 'Μάιος %d', 'Ιούνιος %d', 'Ιούλιος %d',
'Άυγουστος %d', 'Σεπτέμβριος %d', 'Οκτώβριος %d',
'Νοέμβριος %d', 'Δεκέμβριος %d'])
addFmt2('en', True, '%s %%d', True)
addFmt2('eo', True, '%s de %%d')
addFmt2('es', True, '%s de %%d', True)
addFmt2('et', True, '%s %%d', True)
addFmt2('fi', True, '%s %%d', True)
addFmt1('fr', True, ['Janvier %d', 'Février %d', 'Mars %d', 'Avril %d',
'Mai %d', 'Juin %d', 'Juillet %d', 'Août %d',
'Septembre %d', 'Octobre %d', 'Novembre %d',
'Décembre %d'])
addFmt2('he', True, '%s %%d', True)
addFmt2('it', True, 'Attualità/Anno %%d - %s', True)
addFmt1('ja', True, ['「最近の出来事」%%d年%d月' % mm for mm in range(1, 13)])
addFmt2('ka', True, '%s, %%d')
addFmt1('ko', True, ['%d년 1월', '%d년 2월', '%d년 3월', '%d년 4월', '%d년 5월',
'%d년 6월', '%d년 7월', '%d년 8월', '%d년 9월', '%d년 10월',
'%d년 11월', '%d년 12월'])
addFmt1('li', True, ['januari %d', 'februari %d', 'miert %d', 'april %d',
'mei %d', 'juni %d', 'juli %d', 'augustus %d',
'september %d', 'oktober %d', 'november %d',
'december %d'])
addFmt1('nl', True, ['Januari %d', 'Februari %d', 'Maart %d', 'April %d',
'Mei %d', 'Juni %d', 'Juli %d', 'Augustus %d',
'September %d', 'Oktober %d', 'November %d',
'December %d'])
addFmt2('pl', True, '%s %%d', True)
addFmt1('scn', True, [None, None, 'Marzu %d', None, None, None, None, None,
None, None, None, None])
addFmt2('sk', True, '%s %%d')
addFmt2('sv', True, '%s %%d', True)
addFmt2('th', True, '%s พ.ศ. %%T')
addFmt2('tl', True, '%s %%d')
addFmt2('tt', True, '%s, %%d', True)
addFmt2('uk', True, '%s %%d', True)
addFmt2('ur', True, '%s %%d', True)
addFmt1('vi', True, makeMonthList('Tháng %d năm %%d'))
addFmt1('zh', True, makeMonthList('%%d年%d月'))
addFmt1('nan', True, makeMonthList('%%d nî %d goe̍h'))
# This table defines the limits for each type of format data.
# Each item is a tuple with
# - a predicate function which returns True if the value falls
# within acceptable limits, False otherwise,
# - start value
# - end value
#
# TODO: Before compat 19d1cf9e (2006), there was a 'step' in the tuple,
# used exclusively by DecadeAD and DecadeBC to increment by 10 years.
# "and v%10==0" should be added to the limitation predicate for those two.
formatLimits = {
'MonthName': (lambda v: 1 <= v and v < 13, 1, 13),
'Number': (lambda v: 0 <= v and v < 1000000, 0, 1001),
'YearAD': (lambda v: 0 <= v and v < 2501, 0, 2501),
# zh: has years as old as 前1700年
'YearBC': (lambda v: 0 <= v and v < 4001, 0, 501),
'DecadeAD': (lambda v: 0 <= v and v < 2501, 0, 2501),
# zh: has decades as old as 前1700年代
'DecadeBC': (lambda v: 0 <= v and v < 4001, 0, 501),
# Some centuries use Roman numerals or a given list
# do not exceed them in testing
'CenturyAD': (lambda v: 1 <= v and v < 41, 1, 23),
'CenturyBC': (lambda v: 1 <= v and v < 91, 1, 23),
'CenturyAD_Cat': (lambda v: 1 <= v and v < 41, 1, 23),
'CenturyBC_Cat': (lambda v: 1 <= v and v < 41, 1, 23),
# For millenniums, only test first 3 AD Millenniums and 1 BC Millennium
'MillenniumAD': (lambda v: 1 <= v and v < 6, 1, 4),
'MillenniumBC': (lambda v: 1 <= v and v < 20, 1, 2),
'Cat_Year_MusicAlbums': (lambda v: 1950 <= v and v < 2021, 1950, 2021),
'Cat_BirthsAD': (lambda v: 0 <= v and v < 2501, 0, 2501),
'Cat_DeathsAD': (lambda v: 0 <= v and v < 2501, 0, 2501),
'Cat_BirthsBC': (lambda v: 0 <= v and v < 4001, 0, 501),
'Cat_DeathsBC': (lambda v: 0 <= v and v < 4001, 0, 501),
'CurrEvents': (lambda v: 0 <= v and v < 1, 0, 1),
}
# All month of year articles are in the same format
_formatLimit_MonthOfYear = (lambda v: 1 <= 1900 and v < 2051, 1900, 2051)
for month in yrMnthFmts:
formatLimits[month] = _formatLimit_MonthOfYear
_formatLimit_DayOfMonth31 = (lambda v: 1 <= v and v < 32, 1, 32)
_formatLimit_DayOfMonth30 = (lambda v: 1 <= v and v < 31, 1, 31)
_formatLimit_DayOfMonth29 = (lambda v: 1 <= v and v < 30, 1, 30)
for monthId in range(12):
if (monthId + 1) in (1, 3, 5, 7, 8, 10, 12):
# 31 days a month
formatLimits[dayMnthFmts[monthId]] = _formatLimit_DayOfMonth31
elif (monthId + 1) == 2: # February
# 29 days a month
formatLimits[dayMnthFmts[monthId]] = _formatLimit_DayOfMonth29
else:
# 30 days a month
formatLimits[dayMnthFmts[monthId]] = _formatLimit_DayOfMonth30
@deprecated('calendar.monthrange', since='20150707')
def getNumberOfDaysInMonth(month):
"""
Return the maximum number of days in a given month, 1 being January, etc.
For February always 29 will be given, even it is not a leap year.
"""
# use year 2000 which is a leap year
return calendar.monthrange(2000, month)[1]
def getAutoFormat(lang, title, ignoreFirstLetterCase=True):
"""
Return first matching formatted date value.
@param lang: language code
@param title: value to format
@return: dictName ('YearBC', 'December', ...) and value (a year, date, ...)
@rtype: tuple
"""
for dictName, dict in formats.items():
try:
year = dict[lang](title)
return dictName, year
except Exception:
pass
# sometimes the title may begin with an upper case while its listed as
# lower case, or the other way around
# change case of the first character to the opposite, and try again
if ignoreFirstLetterCase:
try:
if title[0].isupper():
title = first_lower(title)
else:
title = first_upper(title)
return getAutoFormat(lang, title, ignoreFirstLetterCase=False)
except Exception:
pass
return None, None
class FormatDate(object):
"""Format a date."""
def __init__(self, site):
"""Initializer."""
self.site = site
def __call__(self, m, d):
"""Return a formatted month and day."""
return formats['Day_' + enMonthNames[m - 1]][self.site.lang](d)
def formatYear(lang, year):
"""Return year name in a language."""
if year < 0:
return formats['YearBC'][lang](-year)
else:
return formats['YearAD'][lang](year)
def apply_month_delta(date, month_delta=1, add_overlap=False):
"""
Add or subtract months from the date.
By default if the new month has less days then the day of the date it
chooses the last day in the new month. For example a date in the March 31st
added by one month will result in April 30th.
When the overlap is enabled, and there is overlap, then the new_date will
be one month off and get_month_delta will report a number one higher.
It does only work on calendars with 12 months per year, and where the
months are numbered consecutively beginning by 1.
@param date: The starting date
@type date: date
@param month_delta: The amount of months added or subtracted.
@type month_delta: int
@param add_overlap: Add any missing days to the date, increasing the month
once more.
@type add_overlap: bool
@return: The end date
@rtype: type of date
"""
if int(month_delta) != month_delta:
raise ValueError('Month delta must be an integer')
month = (date.month - 1) + month_delta
year = date.year + month // 12
month = month % 12 + 1
day = min(date.day, calendar.monthrange(year, month)[1])
new_date = date.replace(year, month, day)
if add_overlap and day != date.day:
assert date.day > day, 'Day must not be more than length of the month'
new_date += datetime.timedelta(days=date.day - day)
return new_date
def get_month_delta(date1, date2):
"""
Return the difference between to dates in months.
It does only work on calendars with 12 months per year, and where the
months are consecutive and non-negative numbers.
"""
return date2.month - date1.month + (date2.year - date1.year) * 12
| 47.75755
| 79
| 0.500167
|
ccfa509ddd73e5549904dbac4ace3d72c36601bf
| 809
|
py
|
Python
|
manage.py
|
vique254/Review
|
958bb9d45c54565377fae7f8da7d2d253777a5a2
|
[
"MIT"
] | null | null | null |
manage.py
|
vique254/Review
|
958bb9d45c54565377fae7f8da7d2d253777a5a2
|
[
"MIT"
] | 4
|
2020-02-12T03:30:05.000Z
|
2021-09-08T01:34:56.000Z
|
manage.py
|
vique254/Review
|
958bb9d45c54565377fae7f8da7d2d253777a5a2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awards.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 33.708333
| 77
| 0.637824
|
234c12827c5d9f658acdd33e61ac3618ca85c93f
| 1,197
|
py
|
Python
|
Examples/FFT_py/sine.py
|
madhavakrishna/libStubs
|
ea8ba8caf7ab2f592c1e1b38fae18eebb6c68e2f
|
[
"Unlicense"
] | null | null | null |
Examples/FFT_py/sine.py
|
madhavakrishna/libStubs
|
ea8ba8caf7ab2f592c1e1b38fae18eebb6c68e2f
|
[
"Unlicense"
] | null | null | null |
Examples/FFT_py/sine.py
|
madhavakrishna/libStubs
|
ea8ba8caf7ab2f592c1e1b38fae18eebb6c68e2f
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
import scipy.fftpack
import numpy as np
import matplotlib.pyplot as plt
# Number of samplepoints -- FFT size
N = 512
# sampling frequency : fs
# maximum frequency that can be measured "fs/2" Nyquist's theorem
fs= 1000 #Hz
# sampling time period = 1/fs
T = 1/fs
# length of the signal in time
L = N*T
# Vector with sampling time-stamps
t = np.linspace(0.0,L,N)
# input signal
# test signal -- replace this --
y = 0.5 * np.sin(150 * 2.0 * np.pi * t) + np.sin(400 * 2.0 * np.pi * t)
fig, (datain,dataout) = plt.subplots(nrows=1,ncols=2,sharex=False,sharey=False)
yf = scipy.fftpack.fft(y)
# yfabs[0] gives the DC component of the input signal
# If the FFT is input is just real numbers (true for all real world apps.)
# second half of fft-out is complex conjugate of first half of the fft-out
#<VVI> Why are we scaling the output ??
yfabs = np.abs(yf[0:int(N/2)]) * 2/N
fd = np.array(list(range(0,int(N/2)))).astype(float) * fs/N
print(y)
print(yfabs)
datain.plot(t,y)
dataout.plot(fd,yfabs)
#yinv = scipy.fftpack.ifft(yf)
#dataout.plot(t,yinv)
#xf = np.linspace(0.0, 1.0/(2.0*T), N/2)
#fig, ax = plt.subplots()
#ax.plot(xf[1:], 2.0/N * np.abs(yf[1:N/2]))
plt.show()
| 26.6
| 79
| 0.675856
|
a1eb5feb2f1f7efeabace340c2475fe5312d0339
| 1,318
|
py
|
Python
|
openstack_dashboard/dashboards/admin/dashboard.py
|
hashsos/hashcloudos-horizon
|
0cc080ca6777e4a1dac5cbcc6143202baddab176
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/admin/dashboard.py
|
hashsos/hashcloudos-horizon
|
0cc080ca6777e4a1dac5cbcc6143202baddab176
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/admin/dashboard.py
|
hashsos/hashcloudos-horizon
|
0cc080ca6777e4a1dac5cbcc6143202baddab176
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from openstack_auth import utils
import horizon
class Admin(horizon.Dashboard):
name = _("Admin")
slug = "admin"
if getattr(settings, 'POLICY_CHECK_FUNCTION', None):
policy_rules = (('identity', 'admin_required'),
('image', 'context_is_admin'),
('volume', 'context_is_admin'),
('compute', 'context_is_admin'),
('network', 'context_is_admin'),
('orchestration', 'context_is_admin'),)
else:
permissions = (tuple(utils.get_admin_permissions()),)
horizon.register(Admin)
| 35.621622
| 78
| 0.653263
|
853147a611b2f9082033a8e2598774b94704e575
| 568
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractBluedreamsblogJimdofreeCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractBluedreamsblogJimdofreeCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractBluedreamsblogJimdofreeCom.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractBluedreamsblogJimdofreeCom(item):
'''
Parser for 'bluedreamsblog.jimdofree.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 25.818182
| 104
| 0.644366
|
8a877fd49bb7044e8e1dca5cd4874b6a19ab7c24
| 2,148
|
py
|
Python
|
Supported Languages/Python/smash/models/two_factor_authentication_model_response.py
|
SMASH-INC/API
|
d0679f199f786aa24f0510df078b4318c27dcc0f
|
[
"MIT"
] | null | null | null |
Supported Languages/Python/smash/models/two_factor_authentication_model_response.py
|
SMASH-INC/API
|
d0679f199f786aa24f0510df078b4318c27dcc0f
|
[
"MIT"
] | null | null | null |
Supported Languages/Python/smash/models/two_factor_authentication_model_response.py
|
SMASH-INC/API
|
d0679f199f786aa24f0510df078b4318c27dcc0f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
smash.models.two_factor_authentication_model_response
This file was automatically generated for SMASH by SMASH v2.0 ( https://smashlabs.io )
"""
class TwoFactorAuthenticationModelResponse(object):
"""Implementation of the 'Two Factor Authentication Model Response' model.
TODO: type model description here.
Attributes:
key (string): TODO: type description here.
uid (string): TODO: type description here.
to (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"key" : "key",
"uid" : "uid",
"to" : "to"
}
def __init__(self,
key=None,
uid=None,
to=None,
additional_properties = {}):
"""Constructor for the TwoFactorAuthenticationModelResponse class"""
# Initialize members of the class
self.key = key
self.uid = uid
self.to = to
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
key = dictionary.get("key")
uid = dictionary.get("uid")
to = dictionary.get("to")
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(key,
uid,
to,
dictionary)
| 26.85
| 90
| 0.582868
|
412e42d963695845c374fb1972e17e53c57cb880
| 342
|
py
|
Python
|
middleware/headers.py
|
enricobacis/flaskey
|
203038b438545f33e2570e60974aab0a7af7bd2a
|
[
"Apache-2.0"
] | null | null | null |
middleware/headers.py
|
enricobacis/flaskey
|
203038b438545f33e2570e60974aab0a7af7bd2a
|
[
"Apache-2.0"
] | null | null | null |
middleware/headers.py
|
enricobacis/flaskey
|
203038b438545f33e2570e60974aab0a7af7bd2a
|
[
"Apache-2.0"
] | null | null | null |
from flask.wrappers import Request
class HeaderGetter(object):
"""Header Getter middleware"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
req = Request(environ, shallow=True)
print req.headers.get('X-Auth-Token')
return self.app(environ, start_response)
| 26.307692
| 48
| 0.669591
|
166d84789674e7392faac57081156d323b920666
| 82
|
py
|
Python
|
benchmarks/python/PythonApp1.py
|
satoshigeyuki/Centaurus
|
032ffec87fc8ddb129347974d3478fd1ee5f305a
|
[
"MIT"
] | 3
|
2021-02-23T01:34:28.000Z
|
2021-07-19T08:07:10.000Z
|
benchmarks/python/PythonApp1.py
|
satoshigeyuki/Centaurus
|
032ffec87fc8ddb129347974d3478fd1ee5f305a
|
[
"MIT"
] | null | null | null |
benchmarks/python/PythonApp1.py
|
satoshigeyuki/Centaurus
|
032ffec87fc8ddb129347974d3478fd1ee5f305a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import CoreLib
if __name__ == "__main__":
pass
| 11.714286
| 26
| 0.695122
|
51f574ee9cfdc982cc7a4179c2a530c3a62fbc2a
| 803
|
py
|
Python
|
utils/neuron/models/trackers/_dummy.py
|
tsingqguo/ABA
|
c32edbbe5705b0332a08951b5ee436b5f58c2e70
|
[
"MIT"
] | 12
|
2021-07-27T07:18:24.000Z
|
2022-03-09T13:52:20.000Z
|
utils/neuron/models/trackers/_dummy.py
|
tsingqguo/ABA
|
c32edbbe5705b0332a08951b5ee436b5f58c2e70
|
[
"MIT"
] | 2
|
2021-08-03T09:21:33.000Z
|
2021-12-29T14:25:30.000Z
|
utils/neuron/models/trackers/_dummy.py
|
tsingqguo/ABA
|
c32edbbe5705b0332a08951b5ee436b5f58c2e70
|
[
"MIT"
] | 3
|
2021-11-18T14:46:40.000Z
|
2022-01-03T15:47:23.000Z
|
from neuron.config import registry
from .tracker import Tracker, OxUvA_Tracker
__all__ = ['DummyTracker', 'DummyOxUvA_Tracker']
@registry.register_module
class DummyTracker(Tracker):
def __init__(self):
super(DummyTracker, self).__init__(
name='Dummy', is_deterministic=True, input_type='file')
def init(self, img, init_bbox):
self.bbox = init_bbox
def update(self, img):
return self.bbox
@registry.register_module
class DummyOxUvA_Tracker(OxUvA_Tracker):
def __init__(self):
super(DummyOxUvA_Tracker, self).__init__(
name='Dummy', is_deterministic=True, input_type='file')
def init(self, img, init_bbox):
self.bbox = init_bbox
def update(self, img):
return self.bbox, 1.0, True
| 23.617647
| 67
| 0.667497
|
628d5769eee2209f07a0c3229948824d1607a213
| 289,961
|
py
|
Python
|
theano/tensor/opt.py
|
brandonwillard/Theano
|
f375a0e999b950a81824a003f685b0bfd1c4e405
|
[
"BSD-3-Clause"
] | null | null | null |
theano/tensor/opt.py
|
brandonwillard/Theano
|
f375a0e999b950a81824a003f685b0bfd1c4e405
|
[
"BSD-3-Clause"
] | null | null | null |
theano/tensor/opt.py
|
brandonwillard/Theano
|
f375a0e999b950a81824a003f685b0bfd1c4e405
|
[
"BSD-3-Clause"
] | 1
|
2020-08-15T17:09:10.000Z
|
2020-08-15T17:09:10.000Z
|
""" Tensor optimizations addressing the ops in basic.py."""
# TODO: intelligent merge for mul/add
# TODO: 0*x -> 0
import itertools
import logging
import operator
import sys
import time
import traceback
import warnings
from collections import defaultdict
from functools import reduce
import numpy as np
from six import StringIO
import theano
import theano.scalar.basic as ts
from theano import compile, config, gof # to register the optimizer built by this file
from theano.compile.ops import Shape, Shape_i
from theano.gof import (
Constant,
InconsistencyError,
LocalOptimizer,
OpRemove,
PatternSub,
TopoOptimizer,
Variable,
graph,
opt,
toolbox,
)
from theano.gof.op import Op
from theano.gof.opt import (
Optimizer,
copy_stack_trace,
in2out,
local_optimizer,
pre_constant_merge,
pre_greedy_local_optimizer,
)
from theano.gof.utils import MethodNotDefined, TestValueError
from theano.gradient import DisconnectedType
# Work-around for Python 3.6 issue that prevents `import theano.tensor as tt`
from theano.tensor import basic as tt
from theano.tensor.basic import (
Alloc,
AllocEmpty,
Dot,
Flatten,
Join,
NotScalarConstantError,
Rebroadcast,
Reshape,
ScalarFromTensor,
ShapeError,
Split,
TensorFromScalar,
Tile,
abs_,
add,
alloc,
erf,
erfc,
extract_constant,
fill,
get_scalar_constant_value,
int_div,
inv,
log,
log1p,
mul,
neg,
pow,
sub,
tensor_copy,
true_div,
)
from theano.tensor.elemwise import (
All,
Any,
CAReduce,
DimShuffle,
Elemwise,
Prod,
ProdWithoutZeros,
Sum,
)
from theano.tensor.sort import TopKOp
from theano.tensor.subtensor import (
AdvancedIncSubtensor,
AdvancedIncSubtensor1,
AdvancedSubtensor1,
IncSubtensor,
Subtensor,
advanced_inc_subtensor1,
advanced_subtensor,
advanced_subtensor1,
as_index_constant,
get_canonical_form_slice,
get_idx_list,
)
from theano.tensor.type import (
values_eq_approx_remove_inf,
values_eq_approx_remove_inf_nan,
values_eq_approx_remove_nan,
)
# import theano.tensor.basic as tt
_logger = logging.getLogger("theano.tensor.opt")
def _fill_chain(new_out, orig_inputs):
for i in orig_inputs:
new_out = fill(i, new_out)
return [new_out]
def encompasses_broadcastable(b1, b2):
"""
Parameters
----------
b1
The broadcastable attribute of a tensor type.
b2
The broadcastable attribute of a tensor type.
Returns
-------
bool
True if the broadcastable patterns b1 and b2 are such that b2 is
broadcasted to b1's shape and not the opposite.
"""
if len(b1) < len(b2):
return False
b1 = b1[-len(b2) :]
return not any(v1 and not v2 for v1, v2 in zip(b1, b2))
def merge_broadcastables(broadcastables):
return [all(bcast) for bcast in zip(*broadcastables)]
def scalarconsts_rest(inputs, elemwise=True, only_process_constants=False):
"""Partition a list of variables into two kinds:
scalar constants, and the rest."""
consts = []
origconsts = []
nonconsts = []
for i in inputs:
try:
v = get_scalar_constant_value(
i, elemwise=elemwise, only_process_constants=only_process_constants
)
consts.append(v)
origconsts.append(i)
except NotScalarConstantError:
nonconsts.append(i)
return consts, origconsts, nonconsts
def broadcast_like(value, template, fgraph, dtype=None):
"""
Return a Variable with the same shape and dtype as the template,
filled by broadcasting value through it. `value` will be cast as
necessary.
"""
value = tt.as_tensor_variable(value)
if value.type == template.type:
return value
if template not in fgraph.variables:
raise NotImplementedError(
"broadcast_like currently requires the "
"template Variable to be in the fgraph already"
)
if dtype is None:
dtype = template.dtype
value = tt.cast(value, dtype)
if value.type == template.type:
return value
if hasattr(fgraph, "shape_feature"):
new_shape = fgraph.shape_feature.shape_of[template]
else:
new_shape = template.shape
rval = alloc(value, *new_shape)
# the template may have 1s in its shape without being broadcastable
if rval.broadcastable != template.broadcastable:
rval = tt.unbroadcast(
rval,
*[
i
for i in range(rval.ndim)
if rval.broadcastable[i] and not template.broadcastable[i]
],
)
assert rval.type.dtype == dtype
if rval.type.broadcastable != template.broadcastable:
raise AssertionError(
"rval.type.broadcastable is "
+ str(rval.type.broadcastable)
+ " but template.broadcastable is"
+ str(template.broadcastable)
)
return rval
class InplaceElemwiseOptimizer(Optimizer):
"""
We parametrise it to make it work for Elemwise and GpuElemwise op.
"""
def __init__(self, OP):
self.op = OP
def add_requirements(self, fgraph):
fgraph.attach_feature(gof.destroyhandler.DestroyHandler())
@staticmethod
def print_profile(stream, prof, level=0):
blanc = " " * level
print(blanc, "InplaceElemwiseOptimizer ", prof["opt"].op, file=stream)
for k in [
"node_before",
"nb_call_replace",
"nb_call_validate",
"nb_inconsistent",
]:
print(blanc, k, prof[k], file=stream)
ndim = prof["ndim"]
if ndim:
print(blanc, "ndim", "nb", file=stream)
for n in sorted(ndim.keys()):
print(blanc, n, ndim[n], file=stream)
def apply(self, fgraph):
"""
Usage: InplaceElemwiseOptimizer(op).optimize(fgraph)
Attempts to replace all Broadcast ops by versions of them
that operate inplace. It operates greedily: for each Broadcast
Op that is encountered, for each output, tries each input to
see if it can operate inplace on that input. If so, makes the
change and go to the next output or Broadcast Op.
Examples
--------
`x + y + z -> x += y += z`
`(x + y) * (x * y) -> (x += y) *= (x * y) or (x + y) *= (x *= y)`
"""
# We should not validate too often as this takes too much time to
# execute!
# It is the _dfs_toposort() fct in theano/gof/destroyhandler.py
# that takes so much time.
# Should we try to use another lib that does toposort?
# igraph: http://igraph.sourceforge.net/
# networkx: https://networkx.lanl.gov/
# Should we try to use cython?
# Compiling only that fct is not enough, should we try to add the
# deque class too?
# And init the deque and other list to an upper bound number of
# elements?
# Maybe Theano should do online toposort as in
# http://code.google.com/p/acyclic
#
# The next longest optimizer is the canonizer phase.
# Then I think it is the [io_?]toposort (need to validate) so check if
# the solution is also applicable there.
# We execute `validate` after this number of change.
prof = {
"opt": self,
"node_before": len(fgraph.apply_nodes),
"nb_call_replace": 0,
"nb_call_validate": 0,
"nb_inconsistent": 0,
"ndim": defaultdict(lambda: 0),
}
check_each_change = config.tensor.insert_inplace_optimizer_validate_nb
if check_each_change == -1:
if len(fgraph.apply_nodes) > 500:
check_each_change = 10
else:
check_each_change = 1
nb_change_no_validate = 0
chk = fgraph.checkpoint()
if fgraph.update_mapping:
update_outs = [fgraph.outputs[i] for i in fgraph.update_mapping]
else:
update_outs = []
protected_inputs = [
f.protected
for f in fgraph._features
if isinstance(f, theano.compile.function_module.Supervisor)
]
protected_inputs = sum(protected_inputs, []) # flatten the list
protected_inputs.extend(fgraph.outputs)
for node in list(graph.io_toposort(fgraph.inputs, fgraph.outputs)):
op = node.op
# gpuarray GpuElemwise inherit from Elemwise
if not type(op) == self.op:
continue
# If big graph and the outputs are scalar, do not make it
# inplace.
if (
check_each_change != 1
and
# If multiple outputs, they must all have the same size,
# so only check the first.
getattr(node.outputs[0].type, "ndim", -1) == 0
):
continue
if op.inplace_pattern:
# Maybe this isn't needed anymore, but I don't want to
# rish regression now. This case only happen if the
# original node add already some inplace patter and we
# still try to add more pattern.
baseline = op.inplace_pattern
candidate_outputs = [
i for i in range(len(node.outputs)) if i not in baseline
]
# node inputs that are Constant, already destroyed,
# or fgraph protected inputs and fgraph outputs can't be used as
# inplace target.
# Remove here as faster.
candidate_inputs = [
i
for i in range(len(node.inputs))
if i not in baseline.values()
and not isinstance(node.inputs[i], Constant)
and
# the next line should not be costly most of the time.
not fgraph.has_destroyers([node.inputs[i]])
and node.inputs[i] not in protected_inputs
]
else:
baseline = []
candidate_outputs = list(range(len(node.outputs)))
# node inputs that are Constant, already destroyed,
# fgraph protected inputs and fgraph outputs can't be used as inplace
# target.
# Remove here as faster.
candidate_inputs = [
i
for i in range(len(node.inputs))
if not isinstance(node.inputs[i], Constant)
and not fgraph.has_destroyers([node.inputs[i]])
and node.inputs[i] not in protected_inputs
]
verbose = False
raised_warning = not verbose
for candidate_output in candidate_outputs:
# If the output of the node can be established as an update
# output of the fgraph, visit the candidate_inputs in an order
# that will improve the chances of making the node operate
# inplace on the input it's meant to update
candidate_out_var = node.outputs[candidate_output]
sorted_candidate_inputs = candidate_inputs
if candidate_out_var in update_outs:
# The candidate output is an update. Sort the
# variables in candidate_inputs in the following order:
# - Vars corresponding to the actual updated input
# (best case scenario is for the node that procudes
# an update to operate inplace on the variable to
# update)
# - Vars computed inplace on the updates input (second
# best scenario if for the node to work inplace on
# a variable obtained by a chain of inplace on the
# variable to update. In some cases, this will be
# equivalent to operating inplace on the variable to
# update)
# - Remaining variables
updated_inputs = []
for i, f_out in enumerate(fgraph.outputs):
if f_out is candidate_out_var and i in fgraph.update_mapping:
updated_inp_idx = fgraph.update_mapping[i]
updated_inputs.append(fgraph.inputs[updated_inp_idx])
updated_vars = []
vars_from_inplace = []
other_vars = []
for inp_idx in candidate_inputs:
inp = node.inputs[inp_idx]
if inp in updated_inputs:
# the candidate input is the actual updated input
updated_vars.append(inp_idx)
elif (
hasattr(fgraph, "destroy_handler")
and inp.owner
and any(
[
fgraph.destroy_handler.root_destroyer.get(
up_inp, None
)
is inp.owner
for up_inp in updated_inputs
]
)
):
# the candidate input is a variable computed
# inplace on the updated input via a sequence of
# one or more inplace operations
vars_from_inplace.append(inp_idx)
else:
other_vars.append(inp_idx)
sorted_candidate_inputs = (
updated_vars + vars_from_inplace + other_vars
)
for candidate_input in sorted_candidate_inputs:
# remove inputs that don't have the same dtype as the output
if (
node.inputs[candidate_input].type
!= node.outputs[candidate_output].type
):
continue
inplace_pattern = dict(baseline)
inplace_pattern[candidate_output] = candidate_input
try:
if hasattr(op.scalar_op, "make_new_inplace"):
new_scal = op.scalar_op.make_new_inplace(
ts.transfer_type(
*[
inplace_pattern.get(i, o.dtype)
for i, o in enumerate(node.outputs)
]
)
)
else:
new_scal = op.scalar_op.__class__(
ts.transfer_type(
*[
inplace_pattern.get(i, None)
for i in range(len(node.outputs))
]
)
)
new_outputs = self.op(new_scal, inplace_pattern)(
*node.inputs, **dict(return_list=True)
)
new_node = new_outputs[0].owner
for r, new_r in zip(node.outputs, new_outputs):
prof["nb_call_replace"] += 1
fgraph.replace(
r, new_r, reason="inplace_elemwise_optimizer"
)
nb_change_no_validate += 1
prof["ndim"][candidate_out_var.ndim] += 1
if nb_change_no_validate >= check_each_change:
prof["nb_call_validate"] += 1
fgraph.validate()
chk = fgraph.checkpoint()
nb_change_no_validate = 0
except (ValueError, InconsistencyError) as e:
prof["nb_inconsistent"] += 1
if check_each_change != 1 and not raised_warning:
print(
(
"Some inplace optimization was not "
"performed due to unexpected error:"
),
file=sys.stderr,
)
print(e, file=sys.stderr)
raised_warning = True
fgraph.revert(chk)
continue
candidate_inputs.remove(candidate_input)
node = new_node
baseline = inplace_pattern
break
if nb_change_no_validate > 0:
try:
fgraph.validate()
except Exception:
if not raised_warning:
print(
(
"Some inplace optimization was not "
"performed due to unexpected error"
),
file=sys.stderr,
)
fgraph.revert(chk)
return prof
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print(
"{}{} ({})".format((" " * level), self.__class__.__name__, self.op),
file=stream,
)
return inplace_elemwise_optimizer
inplace_elemwise_optimizer = InplaceElemwiseOptimizer(Elemwise)
compile.optdb.register(
"inplace_elemwise_opt",
inplace_elemwise_optimizer,
75,
"inplace_opt", # for historic reason
"inplace_elemwise_optimizer",
"fast_run",
"inplace",
)
def register_useless(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_useless(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = kwargs.pop("name", None) or lopt.__name__
compile.mode.local_useless.register(
name, lopt, "last", "fast_run", *tags, **kwargs
)
return lopt
def register_canonicalize(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_canonicalize(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = kwargs.pop("name", None) or lopt.__name__
compile.optdb["canonicalize"].register(name, lopt, "fast_run", *tags, **kwargs)
return lopt
def register_stabilize(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_stabilize(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = kwargs.pop("name", None) or lopt.__name__
compile.optdb["stabilize"].register(name, lopt, "fast_run", *tags, **kwargs)
return lopt
def register_specialize(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_specialize(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = kwargs.pop("name", None) or lopt.__name__
compile.optdb["specialize"].register(name, lopt, "fast_run", *tags, **kwargs)
return lopt
def register_uncanonicalize(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_uncanonicalize(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = (kwargs and kwargs.pop("name", None)) or lopt.__name__
compile.optdb["uncanonicalize"].register(
name, lopt, "fast_run", *tags, **kwargs
)
return lopt
def register_specialize_device(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_specialize_device(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = (kwargs and kwargs.pop("name", None)) or lopt.__name__
compile.optdb["specialize_device"].register(
name, lopt, "fast_run", *tags, **kwargs
)
return lopt
#####################
# Dot optimizations #
#####################
@register_canonicalize
@register_stabilize
@local_optimizer([Dot])
def local_0_dot_x(node):
if not isinstance(node.op, Dot):
return False
x = node.inputs[0]
y = node.inputs[1]
replace = False
try:
if get_scalar_constant_value(x, only_process_constants=True) == 0:
replace = True
except NotScalarConstantError:
pass
try:
if get_scalar_constant_value(y, only_process_constants=True) == 0:
replace = True
except NotScalarConstantError:
pass
if replace:
constant_zero = tt.constant(0, dtype=node.outputs[0].type.dtype)
if x.ndim == 2 and y.ndim == 2:
constant_zero = assert_(constant_zero, tt.eq(x.shape[1], y.shape[0]))
return [alloc(constant_zero, x.shape[0], y.shape[1])]
elif x.ndim == 1 and y.ndim == 2:
constant_zero = assert_(constant_zero, tt.eq(x.shape[0], y.shape[0]))
return [alloc(constant_zero, y.shape[1])]
elif x.ndim == 2 and y.ndim == 1:
constant_zero = assert_(constant_zero, tt.eq(x.shape[1], y.shape[0]))
return [alloc(constant_zero, x.shape[0])]
elif x.ndim == 1 and y.ndim == 1:
constant_zero = assert_(constant_zero, tt.eq(x.shape[0], y.shape[0]))
return [constant_zero]
else:
_logger.warning(
"Optimization Warning: "
"Optimization theano/opt.py:local_0_dot_x Found "
"that it could apply, but was not implemented "
"for dot product with these input types:\n"
"(%s, %s)",
x.type,
y.type,
)
######################
# DimShuffle lifters #
######################
def apply_local_dimshuffle_lift(var):
# return var
# lift recursively
if not var.owner:
return var
new = local_dimshuffle_lift.transform(var.owner)
if new:
return new[0]
return var
# Checks for two types of useless dimshuffles:
# 1 - dimshuffle all dimensions in order.
# 2 - dimshuffle a broadcastable dimension.
def is_dimshuffle_useless(new_order, input):
is_useless = True
if len(new_order) == input.type.ndim:
all_broadcastable_dims = [
i
for (i, is_broadcastable) in enumerate(input.type.broadcastable)
if is_broadcastable
] + ["x"]
for i in range(input.type.ndim):
if new_order[i] == i or (
i in all_broadcastable_dims and new_order[i] in all_broadcastable_dims
):
is_useless = True
else:
is_useless = False
break
else:
is_useless = False
return is_useless
@local_optimizer([DimShuffle])
def local_dimshuffle_lift(node):
"""
"Lifts" DimShuffle through Elemwise operations and merges
consecutive DimShuffles. Basically, applies the following
transformations on the whole graph:
DimShuffle(Elemwise(x, y)) => Elemwise(DimShuffle(x), DimShuffle(y))
DimShuffle(DimShuffle(x)) => DimShuffle(x)
DimShuffle{0,1,...}(x) => x (when the dimshuffle do nothing)
After this transform, clusters of Elemwise operations are
void of DimShuffle operations.
"""
op = node.op
if not isinstance(op, DimShuffle):
return False
input = node.inputs[0]
inode = input.owner
new_order = op.new_order
if inode and isinstance(inode.op, Elemwise) and (len(input.clients) == 1):
# Don't use make_node to have tag.test_value set.
new_inputs = []
for inp in inode.inputs:
new_inp = op.__class__(inp.type.broadcastable, op.new_order)(inp)
new_inputs.append(apply_local_dimshuffle_lift(new_inp))
copy_stack_trace(node.outputs[0], new_inputs)
ret = inode.op(*new_inputs, **dict(return_list=True))
return ret
if inode and isinstance(inode.op, DimShuffle):
new_order = [x == "x" and "x" or inode.op.new_order[x] for x in new_order]
input = inode.inputs[0]
if is_dimshuffle_useless(new_order, input):
return [input]
elif inode and isinstance(inode.op, DimShuffle):
ret = op.__class__(input.type.broadcastable, new_order)(input)
ret = apply_local_dimshuffle_lift(ret)
copy_stack_trace(node.outputs[0], ret)
return [ret]
@register_canonicalize
@local_optimizer([Reshape])
def local_useless_dimshuffle_in_reshape(node):
"""
Removes useless DimShuffle operation inside Reshape:
reshape(vector.dimshuffle('x', 0), shp) => reshape(vector, shp)
reshape(matrix.dimshuffle('x', 0, 'x', 1), shp) => reshape(matrix, shp)
reshape(row.dimshuffle(1, 'x'), shp) => reshape(row, shp)
reshape(col.dimshuffle(0), shp) => reshape(col, shp)
"""
op = node.op
if not isinstance(op, Reshape):
return False
if not (
node.inputs[0].owner is not None
and isinstance(node.inputs[0].owner.op, DimShuffle)
):
return False
new_order = node.inputs[0].owner.op.new_order
input = node.inputs[0].owner.inputs[0]
broadcastables = node.inputs[0].broadcastable
new_order_of_nonbroadcast = []
for i, bd in zip(new_order, broadcastables):
if not bd:
new_order_of_nonbroadcast.append(i)
no_change_in_order = all(
new_order_of_nonbroadcast[i] <= new_order_of_nonbroadcast[i + 1]
for i in range(len(new_order_of_nonbroadcast) - 1)
)
if no_change_in_order:
shape = node.inputs[1]
ret = op.__class__(node.outputs[0].ndim)(input, shape)
copy_stack_trace(node.outputs[0], ret)
return [ret]
@register_canonicalize
@local_optimizer([DimShuffle])
def local_lift_transpose_through_dot(node):
"""
dot(x,y).T -> dot(y.T, x.T)
These optimizations "lift" (propagate towards the inputs) DimShuffle
through dot product. It allows to put the graph in a more standard shape,
and to later merge consecutive DimShuffles.
The transformation should be apply whether or not the transpose is
inplace. The newly-introduced transpositions are not inplace, this will
be taken care of in a later optimization phase.
"""
if not (isinstance(node.op, tt.DimShuffle) and node.op.new_order == (1, 0)):
return False
if not (node.inputs[0].owner and isinstance(node.inputs[0].owner.op, Dot)):
return False
x, y = node.inputs[0].owner.inputs
if x.ndim == y.ndim == 2:
# Output is dot product of transposed inputs in reverse order
ret = [tt.dot(y.T, x.T)]
# Copy over stack trace to output from result of dot-product
copy_stack_trace(node.inputs[0], ret)
return ret
register_canonicalize(local_dimshuffle_lift)
register_specialize(local_dimshuffle_lift)
######################
# Casting operations #
######################
@register_canonicalize
@register_specialize
@local_optimizer([TensorFromScalar])
def local_tensor_scalar_tensor(node):
"""tensor_from_scalar(scalar_from_tensor(x)) -> x"""
if isinstance(node.op, TensorFromScalar):
s = node.inputs[0]
if s.owner and isinstance(s.owner.op, ScalarFromTensor):
t = s.owner.inputs[0]
# We don't need to copy over any stack traces here
return [t]
@register_canonicalize
@register_specialize
@local_optimizer([ScalarFromTensor])
def local_scalar_tensor_scalar(node):
"""scalar_from_tensor(tensor_from_scalar(x)) -> x"""
if isinstance(node.op, ScalarFromTensor):
t = node.inputs[0]
if t.owner and isinstance(t.owner.op, TensorFromScalar):
s = t.owner.inputs[0]
# We don't need to copy over any stack traces here
return [s]
#####################################
# ShapeFeature, Shape optimizations
#####################################
class MakeVector(Op):
"""Concatenate a number of scalars together into a vector.
This is a simple version of stack() that introduces far less cruft
into the graph. Should work with 0 inputs. The constant_folding
optimization will remove it.
"""
__props__ = ("dtype",)
def __init__(self, dtype="int64"):
self.dtype = dtype
def make_node(self, *inputs):
inputs = list(map(tt.as_tensor_variable, inputs))
if not all(a.type == inputs[0].type for a in inputs) or (
len(inputs) > 0 and inputs[0].dtype != self.dtype
):
dtype = ts.upcast(self.dtype, *[i.dtype for i in inputs])
# upcast the input to the determined dtype,
# but don't downcast anything
assert dtype == self.dtype, (
"The upcast of the inputs to MakeVector should match the "
"dtype given in __init__."
)
if not all(self.dtype == tt.cast(i, dtype=dtype).dtype for i in inputs):
raise TypeError(
"MakeVector.make_node expected inputs"
" upcastable to %s. got %s"
% (self.dtype, str([i.dtype for i in inputs]))
)
inputs = [tt.cast(i, dtype=dtype) for i in inputs]
assert all(self.dtype == a.dtype for a in inputs)
assert all(a.ndim == 0 for a in inputs)
if inputs:
dtype = inputs[0].type.dtype
else:
dtype = self.dtype
# bcastable = (len(inputs) == 1)
bcastable = False
otype = tt.TensorType(broadcastable=(bcastable,), dtype=dtype)
return tt.Apply(self, inputs, [otype()])
def perform(self, node, inputs, out_):
(out,) = out_
# not calling theano._asarray as optimization
if (out[0] is None) or (out[0].size != len(inputs)):
out[0] = theano._asarray(inputs, dtype=node.outputs[0].dtype)
else:
# assume that out has correct dtype. there is no cheap way to check
out[0][...] = inputs
def c_code_cache_version(self):
return (2,)
def c_code(self, node, name, inp, out_, props):
(out,) = out_
# Shouldn't use PyArray_TYPE(inp[0]) for the dtype
# when len(inp) == 0 (we need to support this case.
# So there will be (1 * nb_dtype) + ((nb len(inp) - 1 ))
# different c code with the following algo
out_shape = len(inp)
out_num = np.dtype(node.outputs[0].dtype).num
# don't use dtype_%(out)s as when check_input=False, it isn't defined.
out_dtype = node.outputs[0].type.dtype_specs()[1]
if len(inp) > 0:
assert self.dtype == node.inputs[0].dtype
out_num = "PyArray_TYPE(%s)" % inp[0]
ret = (
"""
npy_intp dims[1];
dims[0] = %(out_shape)s;
if(!%(out)s || PyArray_DIMS(%(out)s)[0] != %(out_shape)s){
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject*)PyArray_EMPTY(1, dims, %(out_num)s, 0);
}
"""
% locals()
)
for idx, i in enumerate(inp):
ret += (
"""
*((%(out_dtype)s *)PyArray_GETPTR1(%(out)s, %(idx)s)) = *((%(out_dtype)s *) PyArray_DATA(%(i)s));
"""
% locals()
)
return ret
def infer_shape(self, node, ishapes):
return [(len(ishapes),)]
def grad(self, inputs, output_gradients):
# If the output is of an integer dtype, no gradient shall pass
if self.dtype in tt.discrete_dtypes:
return [ipt.zeros_like().astype(theano.config.floatX) for ipt in inputs]
grads = []
for i, inp in enumerate(inputs):
grads.append(output_gradients[0][i])
return grads
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
make_vector = MakeVector()
class MakeVectorPrinter:
def process(self, r, pstate):
if r.owner is None:
raise TypeError("Can only print make_vector.")
elif isinstance(r.owner.op, MakeVector):
old_precedence = getattr(pstate, "precedence", None)
try:
pstate.precedence = 1000
s = [pstate.pprinter.process(input) for input in r.owner.inputs]
finally:
pstate.precedence = old_precedence
return "[%s]" % ", ".join(s)
else:
raise TypeError("Can only print make_vector.")
tt.pprint.assign(MakeVector, MakeVectorPrinter())
class ShapeFeature:
"""Graph optimizer for removing all calls to shape().
This optimizer replaces all Shapes and Subtensors of Shapes with
Shape_i and MakeVector Ops.
This optimizer has several goals:
1. to 'lift' Shapes to as close to the inputs as possible.
2. to infer the shape of every node in the graph in terms of the
input shapes.
3. remove all fills (T.second, T.fill) from the graph
Lifting shapes as close to the inputs as possible is important for
canonicalization because it is very bad form to have to compute
something just to know how big it will be. Firstly, it is a waste
of time to compute such outputs. But it is important to get rid
of these outputs as early as possible in the compilation process
because the extra computations make it appear as if many internal
graph nodes have multiple clients. Many optimizations refuse to
work on nodes with multiple clients.
Lifting is done by using an `<Op>.infer_shape` function if one is
present, or else using a conservative default. An Op that
supports shape-lifting should define a infer_shape(self, node,
input_shapes) function. The argument input_shapes is a tuple of
tuples... there is an interior tuple for each input to the node.
The tuple has as many elements as dimensions. The element in
position i of tuple j represents the i'th shape component of the
j'th input. The function should return a tuple of tuples. One
output tuple for each node.output. Again, the i'th element of the
j'th output tuple represents the output[j].shape[i] of the
function. If an output is not a TensorType, then None should be
returned instead of a tuple for that output.
For example the infer_shape for a matrix-matrix product would accept
input_shapes=((x0,x1), (y0,y1)) and return ((x0, y1),).
Inferring the shape of internal nodes in the graph is important
for doing size-driven optimizations. If we know how big various
intermediate results will be, we can estimate the cost of many Ops
accurately, and generate c-code that is specific [e.g. unrolled]
to particular sizes.
In cases where you cannot figure out the shape, raise a ShapeError.
Notes
-----
Right now there is only the ConvOp that could really take
advantage of this shape inference, but it is worth it even
just for the ConvOp. All that's necessary to do shape
inference is 1) to mark shared inputs as having a particular
shape, either via a .tag or some similar hacking; and 2) to
add an optional In() argument to promise that inputs will
have a certain shape (or even to have certain shapes in
certain dimensions). We can't automatically infer the shape of
shared variables as they can change of shape during the
execution by default. (NOT IMPLEMENTED YET, BUT IS IN TRAC)
**Using Shape information in Optimizations**
To use this shape information in OPTIMIZATIONS, use the
``shape_of`` dictionary.
For example:
.. code-block:: python
try:
shape_of = node.fgraph.shape_feature.shape_of
except AttributeError:
# This can happen when the mode doesn't include the ShapeFeature.
return
shape_of_output_zero = shape_of[node.output[0]]
The ``shape_of_output_zero`` symbol will contain a tuple, whose
elements are either integers or symbolic integers.
TODO: check to see if the symbols are necessarily
non-constant... or are integer literals sometimes Theano
constants?? That would be confusing.
"""
def get_node_infer_shape(self, node):
try:
shape_infer = node.op.infer_shape
except AttributeError:
shape_infer = self.default_infer_shape
try:
o_shapes = shape_infer(node, [self.shape_of[r] for r in node.inputs])
except ShapeError:
o_shapes = self.default_infer_shape(
node, [self.shape_of[r] for r in node.inputs]
)
except NotImplementedError as e:
raise NotImplementedError(
"Code called by infer_shape failed raising a "
"NotImplementedError. Raising NotImplementedError to "
"indicate that a shape cannot be computed is no longer "
"supported, and one should now use tensor.ShapeError "
"instead. The original exception message is: %s" % e
).with_traceback(e.__traceback__)
except Exception as e:
msg = (
"Failed to infer_shape from Op %s.\nInput shapes: "
"%s\nException encountered during infer_shape: "
"%s\nException message: %s\nTraceback: %s"
) % (
node.op,
[self.shape_of[r] for r in node.inputs],
type(e),
str(e),
traceback.format_exc(),
)
if config.on_shape_error == "raise":
raise Exception(msg).with_traceback(e.__traceback__)
else:
_logger.warning(msg)
o_shapes = self.default_infer_shape(
node, [self.shape_of[r] for r in node.inputs]
)
return o_shapes
def get_shape(self, var, idx):
"""Optimization can call this to get the current shape_i
It is better to call this then use directly shape_of[var][idx]
as this method should update shape_of if needed.
TODO: Up to now, we don't update it in all cases. Update in all cases.
"""
r = self.shape_of[var][idx]
if (
r.owner
and isinstance(r.owner.op, Shape_i)
and r.owner.inputs[0] not in var.fgraph.variables
):
assert var.owner
node = var.owner
# recur on inputs
for i in node.inputs:
if getattr(i, "ndim", None) > 0:
self.get_shape(i, 0)
o_shapes = self.get_node_infer_shape(node)
assert len(o_shapes) == len(node.outputs)
# Only change the variables and dimensions that would introduce
# extra computation
for new_shps, out in zip(o_shapes, node.outputs):
if not hasattr(out, "ndim"):
continue
merged_shps = list(self.shape_of[out])
changed = False
for i in range(out.ndim):
n_r = merged_shps[i]
if (
n_r.owner
and isinstance(n_r.owner.op, Shape_i)
and n_r.owner.inputs[0] not in var.fgraph.variables
):
changed = True
merged_shps[i] = new_shps[i]
if changed:
self.set_shape(out, merged_shps, override=True)
r = self.shape_of[var][idx]
return r
def shape_ir(self, i, r):
"""Return symbolic r.shape[i] for tensor variable r, int i."""
if hasattr(r.type, "broadcastable") and r.type.broadcastable[i]:
return self.lscalar_one
else:
# Do not call make_node for test_value
s = Shape_i(i)(r)
try:
s = get_scalar_constant_value(s)
except NotScalarConstantError:
pass
return s
def shape_tuple(self, r):
"""Return a tuple of symbolic shape vars for tensor variable r."""
if not hasattr(r, "ndim"):
# This happen for NoneConst.
return None
return tuple([self.shape_ir(i, r) for i in range(r.ndim)])
def default_infer_shape(self, node, i_shapes):
"""Return a list of shape tuple or None for the outputs of node.
This function is used for Ops that don't implement infer_shape.
Ops that do implement infer_shape should use the i_shapes parameter,
but this default implementation ignores it.
"""
rval = []
for r in node.outputs:
try:
rval.append(self.shape_tuple(r))
except AttributeError:
rval.append(None)
return rval
def unpack(self, s_i, var):
"""Return a symbolic integer scalar for the shape element s_i.
The s_i argument was produced by the infer_shape() of an Op subclass.
var: the variable that correspond to s_i. This is just for
error reporting.
"""
# unpack the s_i that the Op returned
assert s_i is not None
if s_i == 1:
# don't make the optimizer merge a zillion ones together
# by always returning the same object to represent 1
return self.lscalar_one
if isinstance(s_i, float) and int(s_i) == s_i:
s_i = int(s_i)
if isinstance(s_i, (np.integer, int)) or (
isinstance(s_i, np.ndarray) and s_i.ndim == 0
):
# this shape is a constant
if s_i < 0:
msg = "There is a negative shape in the graph!"
msg += gof.utils.get_variable_trace_string(var)
# The rest of the pipeline don't handle correctly this
# case. So we have 2 choices, stop compilation or
# consider the shape as unknow. As we have more
# chance to give the stack trace here then later, I
# choose that options as it would give better error
# message.
raise AssertionError(msg)
return tt.constant(s_i, dtype="int64")
if isinstance(s_i, (tuple, list)):
# this dimension is the same as many of the inputs
# which tells us that if one of the inputs is known,
# the others all become known.
# TODO: should be implemented in Elemwise, and Dot
#
# worst case, we loop over shape_of and replace things
raise NotImplementedError(s_i)
# s_i is x.shape[i] for some x, we change it to shape_of[x][i]
if (
s_i.owner
and isinstance(s_i.owner.op, Subtensor)
and s_i.owner.inputs[0].owner
and isinstance(s_i.owner.inputs[0].owner.op, Shape)
):
assert s_i.ndim == 0
assert len(s_i.owner.op.idx_list) == 1
# The current Subtensor always put constant index in the graph.
# This was not True in the past. So call the Subtensor function
# that will return the right index.
idx = get_idx_list(s_i.owner.inputs, s_i.owner.op.idx_list)
assert len(idx) == 1
idx = idx[0]
try:
i = get_scalar_constant_value(idx)
except NotScalarConstantError:
pass
else:
# Executed only if no exception was raised
x = s_i.owner.inputs[0].owner.inputs[0]
# x should already have been imported, and should be in shape_of.
s_i = self.shape_of[x][i]
if s_i.type.dtype in tt.integer_dtypes:
if getattr(s_i.type, "ndim", 0):
raise TypeError("Shape element must be scalar", s_i)
return s_i
else:
raise TypeError(
"Unsupported shape element", s_i, type(s_i), getattr(s_i, "type", None)
)
def set_shape(self, r, s, override=False):
"""Assign the shape `s` to previously un-shaped variable `r`.
Parameters
----------
r : a variable
s : None or a tuple of symbolic integers
override : If False, it mean r is a new object in the fgraph.
If True, it mean r is already in the fgraph and we want to
override its shape.
"""
if not override:
assert r not in self.shape_of, "r already in shape_of"
if s is None:
self.shape_of[r] = s
else:
if not isinstance(s, (tuple, list)):
raise TypeError("shapes must be tuple/list", (r, s))
if r.ndim != len(s):
sio = StringIO()
theano.printing.debugprint(r, file=sio, print_type=True)
raise AssertionError(
"Something inferred a shape with %d dimensions "
"for a variable with %d dimensions"
" for the variable:\n%s" % (len(s), r.ndim, sio.getvalue())
)
shape_vars = []
for i in range(r.ndim):
if hasattr(r.type, "broadcastable") and r.type.broadcastable[i]:
shape_vars.append(self.lscalar_one)
else:
shape_vars.append(self.unpack(s[i], r))
assert all(
[
not hasattr(r.type, "broadcastable")
or not r.type.broadcastable[i]
or
# The two following comparison are a speed optimization
# But we never timed this speed optimization!
self.lscalar_one.equals(shape_vars[i])
or self.lscalar_one.equals(tt.extract_constant(shape_vars[i]))
for i in range(r.ndim)
]
)
self.shape_of[r] = tuple(shape_vars)
for sv in shape_vars:
self.shape_of_reverse_index.setdefault(sv, set()).add(r)
def update_shape(self, r, other_r):
"""Replace shape of r by shape of other_r.
If, on some dimensions, the shape of other_r is not informative,
keep the shape of r on those dimensions.
"""
# other_r should already have a shape
assert other_r in self.shape_of, ("other_r not in shape_of", other_r)
other_shape = self.shape_of[other_r]
# If other_shape has no information, call is pointless.
if other_shape is None:
return
if r in self.shape_of:
r_shape = self.shape_of[r]
else:
# If no info is known on r's shape, use other_shape
self.set_shape(r, other_shape)
return
if (
other_r.owner
and r.owner
and other_r.owner.inputs == r.owner.inputs
and other_r.owner.op == r.owner.op
):
# We are doing a merge. So the 2 shapes graph will be the
# same. This is only a speed optimization to call
# ancestors() less frequently.
return
# Merge other_shape with r_shape, giving the priority to other_shape
merged_shape = []
for i, ps in enumerate(other_shape):
if r_shape is None and other_shape:
merged_shape.append(other_shape[i])
elif (
ps.owner
and isinstance(getattr(ps.owner, "op", None), Shape_i)
and ps.owner.op.i == i
and ps.owner.inputs[0] in (r, other_r)
):
# If other_shape[i] is uninformative, use r_shape[i].
# For now, we consider 2 cases of uninformative other_shape[i]:
# - Shape_i(i)(other_r);
# - Shape_i(i)(r).
merged_shape.append(r_shape[i])
elif isinstance(r_shape[i], (Constant, int)):
# We do this to call less often ancestors and make
# sure we have the simplest shape possible.
merged_shape.append(r_shape[i])
elif isinstance(other_shape[i], (Constant, int)):
# We do this to call less often ancestors and make
# sure we have the simplest shape possible.
merged_shape.append(other_shape[i])
elif other_shape[i] == r_shape[i]:
# This mean the shape is equivalent
# We do not want to do the ancestor check in those cases
merged_shape.append(r_shape[i])
elif r_shape[i] in gof.graph.ancestors([other_shape[i]]):
# Another case where we want to use r_shape[i] is when
# other_shape[i] actually depends on r_shape[i]. In that case,
# we do not want to substitute an expression with another that
# is strictly more complex. Such a substitution could also lead
# to cycles: if (in the future) r_shape[i] gets replaced by an
# expression of other_shape[i], other_shape[i] may end up
# depending on itself.
merged_shape.append(r_shape[i])
else:
merged_shape.append(other_shape[i])
assert all(
[
(
not hasattr(r.type, "broadcastable")
or not r.type.broadcastable[i]
and not other_r.type.broadcastable[i]
)
or
# The two following comparison are a speed optimization
# But we never timed this speed optimization!
self.lscalar_one.equals(merged_shape[i])
or self.lscalar_one.equals(
tt.extract_constant(merged_shape[i], only_process_constants=True)
)
for i in range(r.ndim)
]
)
self.shape_of[r] = tuple(merged_shape)
for sv in self.shape_of[r]:
self.shape_of_reverse_index.setdefault(sv, set()).add(r)
def set_shape_i(self, r, i, s_i):
"""Replace element i of shape_of[r] by s_i"""
assert r in self.shape_of
prev_shape = self.shape_of[r]
# prev_shape is a tuple, so we cannot change it inplace,
# so we build another one.
new_shape = []
for j, s_j in enumerate(prev_shape):
if j == i:
new_shape.append(self.unpack(s_i, r))
else:
new_shape.append(s_j)
assert all(
[
not hasattr(r.type, "broadcastable") or not r.type.broadcastable[idx] or
# The two following comparison are a speed optimization
# But we never timed this speed optimization!
self.lscalar_one.equals(new_shape[idx])
or self.lscalar_one.equals(tt.extract_constant(new_shape[idx]))
for idx in range(r.ndim)
]
)
self.shape_of[r] = tuple(new_shape)
for sv in self.shape_of[r]:
self.shape_of_reverse_index.setdefault(sv, set()).add(r)
def init_r(self, r):
"""Register r's shape in the shape_of dictionary."""
if r not in self.shape_of:
try:
self.set_shape(r, self.shape_tuple(r))
except AttributeError: # XXX: where would this come from?
self.set_shape(r, None)
def make_vector_shape(self, r):
return make_vector(*self.shape_of[r])
#
# Feature interface
#
#
def on_attach(self, fgraph):
assert not hasattr(fgraph, "shape_feature")
fgraph.shape_feature = self
# Must be local to the object as otherwise we reuse the same
# variable for multiple fgraph!
self.lscalar_one = tt.constant(1, dtype="int64")
assert self.lscalar_one.type == tt.lscalar
self.shape_of = {}
# Variable -> tuple(scalars) or None (All tensor vars map to tuple)
self.scheduled = {}
# Variable ->
self.shape_of_reverse_index = {}
# shape var -> graph v
for node in fgraph.toposort():
self.on_import(fgraph, node, reason="on_attach")
def on_detach(self, fgraph):
self.shape_of = {}
self.scheduled = {}
self.shape_of_reverse_index = {}
del fgraph.shape_feature
def on_import(self, fgraph, node, reason):
if node.outputs[0] in self.shape_of:
# this is a revert, not really an import
for r in node.outputs + node.inputs:
assert r in self.shape_of
return
for i, r in enumerate(node.inputs):
# make sure we have shapes for the inputs
self.init_r(r)
o_shapes = self.get_node_infer_shape(node)
# this is packed information
# an element of o_shapes is either None or a tuple
# elements of the tuple can be either strings, or ints
if len(o_shapes) != len(node.outputs):
raise Exception(
(
'The infer_shape method for the Op "%s" returned a list '
+ "with the wrong number of element: len(o_shapes) = %d "
+ " != len(node.outputs) = %d"
)
% (str(node.op), len(o_shapes), len(node.outputs))
)
# Ensure shapes are in 'int64'. This is to make sure the assert
# found in the `local_useless_subtensor` optimization does not fail.
for sh_idx, sh in enumerate(o_shapes):
if sh is None:
continue
if not isinstance(sh, (list, tuple)):
raise ValueError(
"infer_shape of %s didn't return a list of"
" list. It returned '%s'" % (str(node), str(o_shapes))
)
new_shape = []
for i, d in enumerate(sh):
# Note: we ignore any shape element that is not typed (i.e.,
# does not have a 'dtype' attribute). This means there may
# still remain int elements that are int32 on 32-bit platforms,
# but this works with `local_useless_subtensor`, so for now we
# keep it this way. See #266 for a better long-term fix.
if getattr(d, "dtype", "int64") != "int64":
assert d.dtype in tt.discrete_dtypes, (node, d.dtype)
assert str(d.dtype) != "uint64", node
new_shape += sh[len(new_shape) : i + 1]
if isinstance(d, tt.Constant):
casted_d = tt.constant(d.data, dtype="int64")
else:
casted_d = tt.cast(d, "int64")
new_shape[i] = casted_d
if new_shape:
# We replace the shape with wrong dtype by the one with
# 'int64'.
new_shape += sh[len(new_shape) :]
o_shapes[sh_idx] = tuple(new_shape)
for r, s in zip(node.outputs, o_shapes):
self.set_shape(r, s)
def on_change_input(self, fgraph, node, i, r, new_r, reason):
if new_r not in self.shape_of:
# It happen that the fgraph didn't called on_import for some
# new_r. This happen when new_r don't have an
# owner(i.e. it is a constant or an input of the graph)
# update_shape suppose that r and new_r are in shape_of.
self.init_r(new_r)
# This tells us that r and new_r must have the same shape if
# we didn't know that the shapes are related, now we do.
self.update_shape(new_r, r)
# change_input happens in two cases:
# 1) we are trying to get rid of r, or
# 2) we are putting things back after a failed transaction.
# In case 1, if r has a shape_i client, we will want to
# replace the shape_i of r with the shape of new_r. Say that
# r is *scheduled*.
# At that point, node is no longer a client of r, but of new_r
for (shpnode, idx) in r.clients + [(node, i)]:
if isinstance(getattr(shpnode, "op", None), Shape_i):
idx = shpnode.op.i
repl = self.shape_of[new_r][idx]
if repl.owner is shpnode:
# This mean the replacement shape object is
# exactly the same as the current shape object. So
# no need for replacement. This happen for example
# with the InputToGpuOptimizer optimizer.
continue
if (
repl.owner
and repl.owner.inputs[0] is shpnode.inputs[0]
and isinstance(repl.owner.op, Shape_i)
and repl.owner.op.i == shpnode.op.i
):
# The replacement is a shape_i of the same
# input. So no need to do this equivalent
# replacement.
continue
if shpnode.outputs[0] in gof.graph.ancestors([repl]):
raise InconsistencyError(
"This substitution would insert a cycle in the graph:"
"node: %s, i: %i, r: %s, new_r: %s" % (node, i, r, new_r)
)
self.scheduled[shpnode] = new_r
# In case 2, if r is a variable that we've scheduled for shape update,
# then we should cancel it.
unscheduled = [k for k, v in self.scheduled.items() if v == r]
for k in unscheduled:
del self.scheduled[k]
# In either case, r could be in shape_of.values(), that is, r itself
# is the shape of something. In that case, we want to update
# the value in shape_of, to keep it up-to-date.
for v in self.shape_of_reverse_index.get(r, []):
# The reverse index is only approximate. It is not updated on
# deletion of variables, or on change_input so it might be the
# case that there are a few extra `v`'s in it that no longer have
# a shape of r or possibly have been deleted from shape_of
# entirely. The important thing is that it permits to recall
# all variables with r in their shape.
for ii, svi in enumerate(self.shape_of.get(v, [])):
if svi == r:
self.set_shape_i(v, ii, new_r)
self.shape_of_reverse_index[r] = set()
def same_shape(self, x, y, dim_x=None, dim_y=None):
"""Return True if we are able to assert that x and y have the
same shape.
dim_x and dim_y are optional. If used, they should be an index
to compare only 1 dimension of x and y.
"""
sx = self.shape_of[x]
sy = self.shape_of[y]
if sx is None or sy is None:
return False
if dim_x is not None:
sx = [sx[dim_x]]
if dim_y is not None:
sy = [sy[dim_y]]
assert len(sx) == len(sy)
# We look on each dimensions we want to compare.
# If any of them can't be asserted to be equal, return False.
# Otherwise, we return True at the end.
for dx, dy in zip(sx, sy):
if dx is dy:
continue
# Need to try to find that they are the same shape. We
# need to compare the full graph. It could be slow. So I
# just implement for now the case of Shape_i.
if not dx.owner or not dy.owner:
return False
if not isinstance(dx.owner.op, Shape_i) or not isinstance(
dy.owner.op, Shape_i
):
return False
opx = dx.owner.op
opy = dy.owner.op
if not (opx.i == opy.i):
return False
# FB I'm not sure if this handle correctly constants.
if dx.owner.inputs[0] == dy.owner.inputs[0]:
continue
# To be sure to cover all case, call equal_computation.
# Can't use theano.gof.graph.is_same_graph(dx, dy)
# As it currently expect that dx and dy aren't in a FunctionGraph
from theano.gof.graph import equal_computations
if not equal_computations([dx], [dy]):
return False
return True
class ShapeOptimizer(Optimizer):
"""Optimizer that serves to add ShapeFeature as an fgraph feature."""
def add_requirements(self, fgraph):
fgraph.attach_feature(ShapeFeature())
def apply(self, fgraph):
pass
class UnShapeOptimizer(Optimizer):
"""Optimizer remove ShapeFeature as an fgraph feature."""
def apply(self, fgraph):
for feature in fgraph._features:
if isinstance(feature, ShapeFeature):
fgraph.remove_feature(feature)
# Register it after merge1 optimization at 0. We don't want to track
# the shape of merged node.
theano.compile.mode.optdb.register(
"ShapeOpt", ShapeOptimizer(), 0.1, "fast_run", "fast_compile"
)
# Not enabled by default for now. Some crossentropy opt use the
# shape_feature. They are at step 2.01. uncanonicalize is at step
# 3. After it goes to 48.5 that move to the gpu. So 10 seem resonable.
theano.compile.mode.optdb.register("UnShapeOpt", UnShapeOptimizer(), 10)
def local_elemwise_alloc_op(ElemwiseOP, AllocOP, DimShuffleOP):
def local_elemwise_alloc(node):
"""
elemwise(alloc(x, shp), ..., y.TensorType(BROADCAST CONDITION))
-> elemwise(x, y.TensorType(BROADCAST CONDITION))
elemwise(dimshuffle(alloc(x, shp)),... ,y.TensorType(BROADCAST CONDITION))
-> elemwise(x.dimshuffle(...), y.TensorType(BROADCAST CONDITION))
BROADCAST CONDITION: the condition is that the one input that are
not to be optimized to have the same broadcast pattern as the
output.
We can change the alloc by a dimshuffle as the elemwise
already have the shape info. The dimshuffle will be faster
to exec.
"""
if not isinstance(node.op, ElemwiseOP):
return False
if len(node.outputs) > 1:
# Ensure all outputs have the same broadcast pattern
# This is a supposition that I'm not sure is always true.
assert all(
[
o.type.broadcastable == node.outputs[0].type.broadcastable
for o in node.outputs[1:]
]
)
# The broadcast pattern of the ouptut must match the broadcast
# pattern of at least one of the inputs.
if not any(
[
i.type.broadcastable == node.outputs[0].type.broadcastable
for i in node.inputs
]
):
return False
def dimshuffled_alloc(i):
return (
isinstance(i.owner.op, DimShuffleOP)
and i.owner.inputs[0].owner
and isinstance(i.owner.inputs[0].owner.op, AllocOP)
)
# At least one input must have an owner that is either a AllocOP or a
# DimShuffleOP with an owner that is a AllocOP -- otherwise there is
# nothing to optimize.
if not any(
[
i.owner and (isinstance(i.owner.op, AllocOP) or dimshuffled_alloc(i))
for i in node.inputs
]
):
return False
# Search for input that we can use as a baseline for the dimensions.
assert_op_idx = -1
for idx, i in enumerate(node.inputs):
if i.type.broadcastable == node.outputs[0].type.broadcastable:
# Prefer an input that is not a AllocOP nor a DimShuffleOP of a
# AllocOP so that all allocs can be optimized.
if not (
i.owner
and (isinstance(i.owner.op, AllocOP) or dimshuffled_alloc(i))
):
assert_op_idx = idx
break
# It may be the case that only AllocOP and DimShuffleOP of AllocOP exist.
if assert_op_idx < 0:
# We want to optimize as many allocs as possible. When
# there is more than one then do all but one. number of
# inputs with alloc or dimshuffle alloc
l2 = [
i
for i in node.inputs
if (
i.owner
and (isinstance(i.owner.op, AllocOP) or dimshuffled_alloc(i))
)
]
# If only 1 alloc or dimshuffle alloc, it is the one we
# will use for the shape. So no alloc would be removed.
if len(l2) > 1:
# l containt inputs with alloc or dimshuffle alloc
# only. Its length will always be at least one, as we
# checked that before
l = [
idx
for idx, i in enumerate(node.inputs)
if i.broadcastable == node.outputs[0].broadcastable
]
assert_op_idx = l[0] # The first one is as good as any to use.
else:
# Nothing would be optimized!
return False
assert_op = node.inputs[assert_op_idx]
cmp_op = assert_op
new_i = []
same_shape = node.fgraph.shape_feature.same_shape
for i in node.inputs:
# Remove alloc
if (
i.owner
and isinstance(i.owner.op, AllocOP)
and i.owner.inputs[0].type != i.owner.outputs[0].type
):
# when i.owner.inputs[0].type == i.owner.outputs[0].type we
# will remove that alloc later
assert i.type.ndim == cmp_op.ndim
if theano.config.experimental.local_alloc_elemwise_assert:
get_shape = node.fgraph.shape_feature.get_shape
cond = []
for idx in range(i.type.ndim):
if not i.type.broadcastable[idx] and not same_shape(
i, cmp_op, idx, idx
):
i_shp = get_shape(i, idx)
cmp_shp = get_shape(cmp_op, idx)
cond.append(tt.eq(i_shp, cmp_shp))
if cond:
assert_op = assert_(assert_op, *cond)
new_i.append(i.owner.inputs[0])
# Remove Alloc in DimShuffle
elif i.owner and dimshuffled_alloc(i):
assert i.type.ndim == cmp_op.type.ndim
if theano.config.experimental.local_alloc_elemwise_assert:
assert_cond = [
tt.eq(i.shape[idx], cmp_op.shape[idx])
for idx in range(i.type.ndim)
if not i.type.broadcastable[idx]
and not same_shape(i, cmp_op, idx, idx)
]
if assert_cond:
assert_op = assert_(assert_op, *assert_cond)
alloc_input = i.owner.inputs[0].owner.inputs[0]
if alloc_input.ndim != i.owner.inputs[0].ndim:
# The alloc can add dimension to the value
# We add a dimshuffle to add them.
# We let later optimization merge the multiple dimshuffle
nb_dim_to_add = i.owner.inputs[0].ndim - alloc_input.ndim
alloc_input = alloc_input.dimshuffle(
["x"] * nb_dim_to_add + list(range(alloc_input.ndim))
)
# We need to keep the dimshuffle. It could swap axes or
# add dimensions anywhere.
r_i = i.owner.op(alloc_input)
# Copy stack trace from i to new_i
copy_stack_trace(i, r_i)
new_i.append(r_i)
else:
new_i.append(i)
new_i[assert_op_idx] = assert_op
ret = node.op(*new_i, return_list=True)
# Copy over stack trace from previous outputs to new outputs.
copy_stack_trace(node.outputs, ret)
return ret
return local_elemwise_alloc
# TODO, global optimizer that lift the assert to the beginning of the graph.
# TODO, optimize all inputs when possible -- currently when all inputs have
# an alloc all but one is optimized.
local_elemwise_alloc = register_specialize(
local_optimizer([Elemwise])(
local_elemwise_alloc_op(Elemwise, Alloc, tt.DimShuffle)
),
"local_alloc_elemwise",
)
@local_optimizer([Elemwise])
def local_fill_sink(node):
"""
f(fill(a, b), fill(c, d), e) -> fill(c, fill(a, f(b, d, e)))
f need to be an elemwise that isn't a fill.
"""
if not hasattr(node, "op") or not isinstance(node.op, Elemwise) or node.op == fill:
return False
models = []
inputs = []
for input in node.inputs:
if input.owner and input.owner.op == fill:
models.append(input.owner.inputs[0])
inputs.append(input.owner.inputs[1])
else:
inputs.append(input)
if not models:
return False
c = node.op(*inputs)
for model in models:
if model.type != c.type:
c = fill(model, c)
# The newly created node c doesn't has 'clients',
# so this iteration is took place with node.outputs[0]
replacements = {node.outputs[0]: c}
for client, cl_idx in node.outputs[0].clients:
if (
hasattr(client, "op")
and isinstance(client.op, Elemwise)
and not client.op == fill
):
client_inputs = client.inputs[:]
client_inputs[cl_idx] = c
new_client = client.op(*client_inputs)
# Add clients to new_client
new_client.owner.outputs[0].clients = client.outputs[0].clients
r = local_fill_sink.transform(new_client.owner)
if not r:
continue
replacements.update(r)
return replacements
register_canonicalize(local_fill_sink)
@register_specialize
@register_stabilize
# @register_canonicalize # We make full pass after the canonizer phase.
@local_optimizer([fill])
def local_fill_to_alloc(node):
"""fill(s,v) -> alloc(v, shape(s))
This is an important optimization because with the shape_to_shape_i
optimization, the dependency on 's' is often removed.
"""
if node.op == fill:
r, v = node.inputs
if v.type == node.outputs[0].type:
# this is a useless fill, erase it.
rval = [v]
elif v.type.broadcastable == node.outputs[0].type.broadcastable:
# this is a cast
rval = [tt.cast(v, node.outputs[0].type.dtype)]
elif r.type.broadcastable == node.outputs[0].type.broadcastable:
# we are broadcasting v somehow, but not r
o = broadcast_like(v, r, node.fgraph, dtype=v.dtype)
copy_stack_trace(node.outputs[0], o)
rval = [o]
else:
# we are broadcasting both v and r,
# the output shape must be computed
#
# TODO: implement this case (including a test!)
#
# I think the strategy should be to extend the shorter
# shape vector with 1s (how?) and then take the
# elementwise max of the two. - how to flag an error of
# shape mismatch where broadcasting should be illegal?
return
# TODO: cut out un-necessary dimshuffles of v
assert rval[0].type == node.outputs[0].type, (
"rval",
rval[0].type,
"orig",
node.outputs[0].type,
"node",
node,
) # theano.printing.debugprint(node.outputs[0], file='str'))
return rval
# Register this after stabilize at 1.5 to make sure stabilize don't
# get affected by less canonicalized graph due to alloc.
compile.optdb.register(
"local_fill_to_alloc", in2out(local_fill_to_alloc), 1.51, "fast_run"
)
# Needed to clean some extra alloc added by local_fill_to_alloc
compile.optdb.register(
"local_elemwise_alloc", in2out(local_elemwise_alloc), 1.52, "fast_run"
)
@register_canonicalize("fast_compile")
@register_useless
@local_optimizer([fill])
def local_useless_fill(node):
"""fill(s,v) -> v
This optimization is only needed in FAST_COMPILE to make the code
more readable. Normally, it is done by the local_fill_to_alloc
opt.
"""
if node.op == fill:
r, v = node.inputs
if v.type == node.outputs[0].type:
# this is a useless fill, erase it.
# also, we don't need to copy over any stack traces here
return [v]
@register_specialize
@register_stabilize
@register_canonicalize
@register_useless
@local_optimizer([alloc])
def local_useless_alloc(node):
"""
If the input type is the same as the output type (dtype and broadcast)
there is no change in the shape of the input. So this is just a simple copy
of the input. This is not needed.
"""
op = node.op
if not isinstance(op, Alloc):
return False
input = node.inputs[0]
output = node.outputs[0]
# Check if dtype and broadcast remain the same.
if input.type == output.type:
# We don't need to copy over any stack traces here
return [input]
@register_specialize
@register_stabilize
@register_canonicalize
@local_optimizer([alloc])
def local_canonicalize_alloc(node):
"""If the input type is the same as the output type (dtype and broadcast)
there is no change in the shape of the input. So this is just a simple copy
of the input. This is not needed. (as local_useless_alloc)
Also, it will canonicalize alloc by creating Dimshuffle after the
alloc to introduce the dimensions of constant size 1.
See https://github.com/Theano/Theano/issues/4072 to know why this
is needed.
"""
op = node.op
if not isinstance(op, Alloc):
return False
input = node.inputs[0]
output = node.outputs[0]
# Check if dtype and broadcast remain the same.
if input.type == output.type:
# We don't need to copy over any stack traces here
return [input]
# Allow local_merge_alloc to do its work first
clients = getattr(output, "clients", [])
for client, i in clients:
if client != "output" and isinstance(client.op, Alloc):
return
# Check if alloc adds a broadcastable dimension with shape 1.
output_shape = node.inputs[1:]
num_dims_with_size_1_added_to_left = 0
for i in range(len(output_shape) - input.ndim):
if extract_constant(output_shape[i], only_process_constants=True) == 1:
num_dims_with_size_1_added_to_left += 1
else:
break
new_output_shape = output_shape[num_dims_with_size_1_added_to_left:]
if num_dims_with_size_1_added_to_left > 0 and len(new_output_shape) >= input.ndim:
if (
output.broadcastable[num_dims_with_size_1_added_to_left:]
== input.broadcastable
):
inner = input
else:
inner = op(*([input] + new_output_shape))
dimshuffle_new_order = ["x"] * num_dims_with_size_1_added_to_left + list(
range(len(new_output_shape))
)
return [DimShuffle(inner.type.broadcastable, dimshuffle_new_order)(inner)]
# Don't register by default.
@local_optimizer([AllocEmpty])
def local_alloc_empty_to_zeros(node):
"""This convert AllocEmpty to Alloc of 0.
This help investigate NaN with NanGuardMode. Not registered by
default. To activate it, use the Theano flag
optimizer_including=alloc_empty_to_zeros. This also enable
the GPU version of this optimizations.
"""
if isinstance(node.op, AllocEmpty):
return [tt.zeros(node.inputs, dtype=node.outputs[0].dtype)]
compile.optdb.register(
"local_alloc_empty_to_zeros",
in2out(local_alloc_empty_to_zeros),
# After move to gpu and merge2, before inplace.
49.3,
"alloc_empty_to_zeros",
)
@register_specialize
@register_canonicalize
@local_optimizer([Shape])
def local_shape_to_shape_i(node):
if node.op == tt.shape:
# This optimization needs ShapeOpt and fgraph.shape_feature
if not hasattr(node.fgraph, "shape_feature"):
return
shape_feature = node.fgraph.shape_feature
ret = shape_feature.make_vector_shape(node.inputs[0])
# We need to copy over stack trace from input to output
copy_stack_trace(node.outputs[0], ret)
return [ret]
# TODO: Not sure what type of node we are expecting here
@register_specialize
@register_canonicalize
@local_optimizer(None)
def local_track_shape_i(node):
try:
shape_feature = node.fgraph.shape_feature
except AttributeError:
return
if node in shape_feature.scheduled:
# Don't unschedule node as it could be reinserted in the
# fgraph as we don't change it in the shapefeature internal
# structure.
assert isinstance(node.op, Shape_i)
replacement = shape_feature.scheduled[node]
return [shape_feature.shape_of[replacement][node.op.i]]
@register_specialize
@register_canonicalize
@local_optimizer([Subtensor])
def local_subtensor_inc_subtensor(node):
"""
Subtensor(SetSubtensor(x, y, idx), idx) -> y
"""
if isinstance(node.op, Subtensor):
x = node.inputs[0]
if not x.owner or not isinstance(x.owner.op, IncSubtensor):
return
if not x.owner.op.set_instead_of_inc:
return
if x.owner.inputs[2:] == node.inputs[1:] and tuple(
x.owner.op.idx_list
) == tuple(node.op.idx_list):
out = node.outputs[0]
y = x.owner.inputs[1]
# If the dtypes differ, cast y into x.dtype
if x.dtype != y.dtype:
y = y.astype(x.dtype)
if out.type == y.type:
# if x[idx] and y have the same type, directly return y
return [y]
else:
# The difference is related to broadcasting pattern
assert out.broadcastable != y.broadcastable
# We have to alloc y to the shape of x[idx]
x_subtensor = node.op(x.owner.inputs[0], *x.owner.inputs[2:])
return [alloc(y, *x_subtensor.shape)]
else:
return
@register_specialize
@register_canonicalize
@local_optimizer([Subtensor])
def local_subtensor_remove_broadcastable_index(node):
"""
Remove broadcastable dimension with index 0 or -1
a[:,:,:,0] -> a.dimshuffle(0,1,2), when
a.broadcastable = (False, False, False, True)
a[0,:,-1,:] -> a.dimshuffle(1,3), when
a.broadcastable = (True, False, True, False)
"""
if isinstance(node.op, Subtensor):
idx = node.op.idx_list
else:
return
remove_dim = []
node_inputs_idx = 1
for dim, elem in enumerate(idx):
if isinstance(elem, (ts.Scalar)):
# The idx is a Scalar, ie a Type. This means the actual index
# is contained in node.inputs[1]
dim_index = node.inputs[node_inputs_idx]
if type(dim_index) == ts.ScalarConstant:
dim_index = dim_index.value
if dim_index in [0, -1] and node.inputs[0].broadcastable[dim]:
remove_dim.append(dim)
node_inputs_idx += 1
else:
return
elif isinstance(elem, slice):
if elem != slice(None):
return
elif isinstance(elem, (int, np.integer)):
if elem in [0, -1] and node.inputs[0].broadcastable[dim]:
remove_dim.append(dim)
else:
raise TypeError("case not expected")
if len(remove_dim) == 0:
return
else:
all_dim = range(node.inputs[0].ndim)
remain_dim = [x for x in all_dim if x not in remove_dim]
return [node.inputs[0].dimshuffle(tuple(remain_dim))]
@register_specialize
@register_canonicalize("fast_compile_gpu")
@register_useless
@local_optimizer([Subtensor, AdvancedSubtensor1])
def local_subtensor_make_vector(node):
"""
Replace all subtensor(make_vector) like:
[a,b,c][0] -> a
[a,b,c][0:2] -> [a,b]
Replace all AdvancedSubtensor1(make_vector) like:
[a,b,c][[0,2]] -> [a,c]
We can do this for constant indexes.
"""
x = node.inputs[0]
if not x.owner or x.owner.op != make_vector:
return
if isinstance(node.op, Subtensor):
# This optimization needs ShapeOpt and fgraph.shape_feature
try:
(idx,) = node.op.idx_list
except Exception:
# 'how can you have multiple indexes into a shape?'
raise
if isinstance(idx, (ts.Scalar, tt.TensorType)):
# The idx is a Scalar, ie a Type. This means the actual index
# is contained in node.inputs[1]
old_idx, idx = idx, node.inputs[1]
assert idx.type == old_idx
elif isinstance(node.op, AdvancedSubtensor1):
idx = node.inputs[1]
else:
return
if isinstance(idx, (int, np.integer)):
# We don't need to copy over any stack traces here
return [x.owner.inputs[idx]]
elif isinstance(idx, Variable):
if idx.ndim == 0:
# if it is a constant we can do something with it
try:
v = get_scalar_constant_value(idx, only_process_constants=True)
if isinstance(v, np.integer):
# Python 2.4 wants to index only with Python integers
v = int(v)
# We don't need to copy over any stack traces here
try:
ret = [x.owner.inputs[v]]
except IndexError:
raise NotScalarConstantError("Bad user graph!")
return ret
except NotScalarConstantError:
pass
elif idx.ndim == 1 and isinstance(idx, tt.Constant):
values = list(map(int, list(idx.value)))
ret = make_vector(*[x.owner.inputs[v] for v in values])
# Copy over stack trace from previous output to new output
copy_stack_trace(node.outputs[0], ret)
ret = tt.patternbroadcast(ret, node.outputs[0].broadcastable)
return [ret]
else:
raise TypeError("case not expected")
elif isinstance(idx, slice):
# it is a slice of ints and/or Variables
# check subtensor to see if it can contain constant variables, and if
# it can, then try to unpack them.
try:
const_slice = node.op.get_constant_idx(node.inputs, allow_partial=False)[0]
ret = make_vector(*x.owner.inputs[const_slice])
# Copy over stack trace from previous outputs to new output
copy_stack_trace(node.outputs, ret)
ret = tt.patternbroadcast(ret, node.outputs[0].broadcastable)
return [ret]
except NotScalarConstantError:
pass
else:
raise TypeError("case not expected")
# TODO: the other optimization for and, or, xor, le and ge see ticket #496.
@register_useless
@register_canonicalize("fast_compile")
@register_specialize
@local_optimizer([Elemwise])
def local_useless_elemwise(node):
"""
eq(x, x) -> 1
neq(x, x) -> 0
mul(x) -> x
add(x) -> x
identity(x) -> x
and(x, 1) -> x (if x.dtype == 'bool')
and(x, 0) -> zeros_like(x)
or(x, 0) -> x
or(x, 1) -> ones_like(x) (if x.dtype == 'bool')
xor(x, x) -> zeros_like(x)
"""
if isinstance(node.op, Elemwise):
# We call zeros_like and one_like with opt=True to generate a
# cleaner graph.
dtype = node.outputs[0].dtype
if node.op.scalar_op == ts.eq and len(node.inputs) == 2:
if node.inputs[0] == node.inputs[1]:
# it is the same var in the graph. That will always be true
ret = tt.ones_like(node.inputs[0], dtype=dtype, opt=True)
# Copy stack trace from input to constant output
copy_stack_trace(node.outputs[0], ret)
return [ret]
elif node.op.scalar_op == ts.neq and len(node.inputs) == 2:
if node.inputs[0] == node.inputs[1]:
# it is the same var in the graph. That will always be false
ret = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)
# Copy stack trace from input to constant output
copy_stack_trace(node.outputs[0], ret)
return [ret]
elif node.op.scalar_op == ts.mul and len(node.inputs) == 1:
# No need to copy over any stack trace
return [node.inputs[0]]
elif node.op.scalar_op == ts.add and len(node.inputs) == 1:
# No need to copy over any stack trace
return [node.inputs[0]]
elif node.op.scalar_op == ts.identity and len(node.inputs) == 1:
return [node.inputs[0]]
elif isinstance(node.op.scalar_op, ts.AND) and len(node.inputs) == 2:
if isinstance(node.inputs[0], tt.TensorConstant):
const_val = tt.extract_constant(
node.inputs[0], only_process_constants=True
)
if not isinstance(const_val, Variable):
if const_val == 0:
return [tt.zeros_like(node.inputs[1], dtype=dtype, opt=True)]
elif node.outputs[0].dtype == "bool":
# If the output is not Boolean, it is the bitwise AND,
# and this optimization would be wrong
return [node.inputs[1].astype(node.outputs[0].dtype)]
if isinstance(node.inputs[1], tt.TensorConstant):
const_val = tt.extract_constant(
node.inputs[1], only_process_constants=True
)
if not isinstance(const_val, Variable):
if const_val == 0:
return [tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)]
elif node.outputs[0].dtype == "bool":
# If the output is not Boolean, it is the bitwise AND,
# and this optimization would be wrong
return [node.inputs[0].astype(node.outputs[0].dtype)]
elif isinstance(node.op.scalar_op, ts.OR) and len(node.inputs) == 2:
if isinstance(node.inputs[0], tt.TensorConstant):
const_val = tt.extract_constant(
node.inputs[0], only_process_constants=True
)
if not isinstance(const_val, Variable):
if const_val == 0:
return [node.inputs[1].astype(node.outputs[0].dtype)]
elif node.outputs[0].dtype == "bool":
# If the output is not Boolean, it is the bitwise OR,
# and this optimization would be wrong
return [tt.ones_like(node.inputs[1], dtype=dtype, opt=True)]
if isinstance(node.inputs[1], tt.TensorConstant):
const_val = tt.extract_constant(
node.inputs[1], only_process_constants=True
)
if not isinstance(const_val, Variable):
if const_val == 0:
return [node.inputs[0].astype(node.outputs[0].dtype)]
elif node.outputs[0].dtype == "bool":
# If the output is not Boolean, it is the bitwise OR,
# and this optimization would be wrong
return [tt.ones_like(node.inputs[0], dtype=dtype, opt=True)]
elif isinstance(node.op.scalar_op, ts.XOR) and len(node.inputs) == 2:
if node.inputs[0] is node.inputs[1]:
return [tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)]
@register_specialize
@local_optimizer([Elemwise])
def local_alloc_unary(node):
"""unary(alloc(x, shp)) -> alloc(unary(x), shp)"""
if isinstance(node.op, Elemwise) and len(node.inputs) == 1:
a = node.inputs[0]
if a.owner and isinstance(a.owner.op, Alloc):
x = a.owner.inputs[0]
shp = a.owner.inputs[1:]
v = node.op(x)
# T.alloc does not preserve the stacktrace of v,
# so we need to copy it over from x.
copy_stack_trace(node.outputs[0], v)
ret = alloc(tt.cast(v, node.outputs[0].dtype), *shp)
# T.cast does not preserve the stacktrace of x,
# so we need to copy it over to the output.
copy_stack_trace([node.outputs[0], a], ret)
return [ret]
@register_canonicalize
@register_specialize
@local_optimizer([Elemwise])
def local_cast_cast(node):
"""cast(cast(x, dtype1), dtype2)
when those contrain:
dtype1 == dtype2
OR the base dtype is the same (int, uint, float, complex)
and the first cast cause an upcast.
"""
if not isinstance(node.op, Elemwise) or not isinstance(node.op.scalar_op, ts.Cast):
return
x = node.inputs[0]
if (
not x.owner
or not isinstance(x.owner.op, Elemwise)
or not isinstance(x.owner.op.scalar_op, ts.Cast)
):
return
type1 = x.owner.op.scalar_op.o_type
type2 = node.op.scalar_op.o_type
base = x.owner.inputs[0]
if type1 == type2:
# We don't need to copy over any stack traces here
return [x]
if is_an_upcast(base.dtype, type1.dtype):
# Checking for further redundancy. Eg: int8 -> int32 -> int8
if type2.dtype == base.dtype:
return x.owner.inputs
else:
# Apply the second cast only
v = node.op(base)
# Copy stack trace from the output of the original cast
copy_stack_trace(node.outputs[0], v)
return [v]
def is_an_upcast(type1, type2):
"""Given two data types (as strings), check if converting to
type2 from type1 constitutes an upcast.
Differs from theano.scalar.upcast
"""
category = {
# The first number in the pair is the dtype (bool, uint, int, float,
# complex). Conversion from higher to lower is never an upcast.
# The second number roughly indicates the precision. Again, conversion
# from higher to lower is never an upcast.
"bool": (0, 0),
"uint8": (1, 1),
"uint16": (1, 2),
"uint32": (1, 3),
"uint64": (1, 4),
"int8": (2, 1),
"int16": (2, 2),
"int32": (2, 3),
"int64": (2, 4),
"float16": (3, 1.5),
"float32": (3, 2.5),
"float64": (3, 3.5),
"complex64": (4, 3),
"complex128": (4, 4),
}
cat1 = category[type1]
cat2 = category[type2]
if cat2[0] >= cat1[0] and cat2[1] > cat1[1]:
return True
else:
return False
@register_canonicalize
@register_specialize
@local_optimizer([Elemwise])
def local_func_inv(node):
"""
Check for two consecutive operations that are functional inverses
and remove them from the function graph.
"""
inv_pairs = (
(ts.Deg2Rad, ts.Rad2Deg),
(ts.Cosh, ts.ArcCosh),
(ts.Tanh, ts.ArcTanh),
(ts.Sinh, ts.ArcSinh),
(ts.Conj, ts.Conj),
(ts.Neg, ts.Neg),
(ts.Inv, ts.Inv),
)
x = node.inputs[0]
if not isinstance(node.op, Elemwise):
return
if not x.owner or not isinstance(x.owner.op, Elemwise):
return
prev_op = x.owner.op.scalar_op
node_op = node.op.scalar_op
for inv_pair in inv_pairs:
if is_inverse_pair(node_op, prev_op, inv_pair):
# We don't need to copy stack trace, because the optimization
# is trivial and maintains the earlier stack trace
return x.owner.inputs
return
def is_inverse_pair(node_op, prev_op, inv_pair):
"""
Given two consecutive operations, check if they are the
provided pair of inverse functions.
"""
node_is_op0 = isinstance(node_op, inv_pair[0])
node_is_op1 = isinstance(node_op, inv_pair[1])
prev_is_op0 = isinstance(prev_op, inv_pair[0])
prev_is_op1 = isinstance(prev_op, inv_pair[1])
return (node_is_op0 and prev_is_op1) or (node_is_op1 and prev_is_op0)
class Assert(Op):
"""
Implements assertion in a computational graph.
Returns the first parameter if the condition is true, otherwise, triggers
AssertionError.
Notes
-----
This Op is a debugging feature. It can be removed from the graph
because of optimizations, and can hide some possible optimizations to
the optimizer. Specifically, removing happens if it can be determined
that condition will always be true. Also, the output of the Op must be
used in the function computing the graph, but it doesn't have to be
returned.
Examples
--------
>>> import theano
>>> T = theano.tensor
>>> x = T.vector('x')
>>> assert_op = T.opt.Assert()
>>> func = theano.function([x], assert_op(x, x.size<2))
"""
_f16_ok = True
__props__ = ("msg",)
view_map = {0: [0]}
check_input = False
def __init__(self, msg="Theano Assert failed!"):
self.msg = msg
def __setstate__(self, attrs):
self.__dict__.update(attrs)
if not hasattr(self, "msg"):
self.msg = "Theano Assert failed!"
def make_node(self, value, *conds):
if not isinstance(value, Variable):
value = tt.as_tensor_variable(value)
cond = [tt.as_tensor_variable(c) for c in conds]
assert np.all([c.type.ndim == 0 for c in cond])
return gof.Apply(self, [value] + cond, [value.type()])
def perform(self, node, inputs, out_):
(out,) = out_
v = inputs[0]
out[0] = v
assert np.all(inputs[1:]), self.msg
def grad(self, input, output_gradients):
return output_gradients + [DisconnectedType()()] * (len(input) - 1)
def connection_pattern(self, node):
return [[1]] + [[0]] * (len(node.inputs) - 1)
def c_code(self, node, name, inames, onames, props):
value = inames[0]
out = onames[0]
check = []
fail = props["fail"]
msg = self.msg.replace('"', '\\"').replace("\n", "\\n")
for idx in range(len(inames) - 1):
i = inames[idx + 1]
dtype = node.inputs[idx + 1].dtype
check.append(
"if(!((npy_%(dtype)s*)PyArray_DATA(%(i)s))[0])"
'{PyErr_SetString(PyExc_AssertionError,"%(msg)s");'
"%(fail)s}" % locals()
)
check = "\n".join(check)
return (
"""
%(check)s
Py_XDECREF(%(out)s);
%(out)s = %(value)s;
Py_INCREF(%(value)s);
"""
% locals()
)
def c_code_cache_version(self):
return (3, 0)
def infer_shape(self, node, input_shapes):
return [input_shapes[0]]
assert_ = Assert()
# Unittest.assert_ is a deprecated name for assertTrue.
# 2to3 convert tt.opt.assert_ to tt.opt.assertTrue
# So I define a new name as a work around.
assert_op = assert_
@register_specialize
@local_optimizer([Assert])
def local_remove_useless_assert(node):
if isinstance(node.op, Assert):
cond = []
for c in node.inputs[1:]:
try:
const = get_scalar_constant_value(c)
if 0 != const.ndim or const == 0:
# Should we raise an error here? How to be sure it
# is not catched?
cond.append(c)
except NotScalarConstantError:
cond.append(c)
if len(cond) == 0:
# We don't need to copy over any stack traces here
return [node.inputs[0]]
if len(cond) != len(node.inputs) - 1:
ret = assert_(node.inputs[0], *cond)
# We copy over stack trace from the output of the original assert
copy_stack_trace(node.outputs[0], ret)
return [ret]
@local_optimizer([Assert])
def local_remove_all_assert(node):
"""An optimization disabled by default that removes all asserts from
the graph.
Notes
-----
See the :ref:`unsafe` section to know how to enable it.
"""
if not isinstance(node.op, Assert):
return
# We don't need to copy over any stack traces here
return [node.inputs[0]]
# Disabled by default
compile.optdb["canonicalize"].register(
"local_remove_all_assert",
local_remove_all_assert,
"unsafe",
use_db_name_as_tag=False,
)
compile.optdb["stabilize"].register(
"local_remove_all_assert",
local_remove_all_assert,
"unsafe",
use_db_name_as_tag=False,
)
compile.optdb["specialize"].register(
"local_remove_all_assert",
local_remove_all_assert,
"unsafe",
use_db_name_as_tag=False,
)
compile.optdb["useless"].register(
"local_remove_all_assert",
local_remove_all_assert,
"unsafe",
use_db_name_as_tag=False,
)
#######################
# Constant Canonicalization
############################
@register_canonicalize
@local_optimizer([Elemwise])
def local_upcast_elemwise_constant_inputs(node):
"""This explicitly upcasts constant inputs to elemwise Ops, when
those Ops do implicit upcasting anyway.
Rationale: it helps merge things like (1-x) and (1.0 - x).
"""
if len(node.outputs) > 1:
return
try:
shape_i = node.fgraph.shape_feature.shape_i
except AttributeError:
shape_i = None
if isinstance(node.op, Elemwise):
scalar_op = node.op.scalar_op
# print "aa", scalar_op.output_types_preference
if getattr(scalar_op, "output_types_preference", None) in (
ts.upgrade_to_float,
ts.upcast_out,
):
# this is the kind of op that we can screw with the input
# dtypes by upcasting explicitly
output_dtype = node.outputs[0].type.dtype
new_inputs = []
for i in node.inputs:
if i.type.dtype == output_dtype:
new_inputs.append(i)
else:
try:
# works only for scalars
cval_i = get_scalar_constant_value(
i, only_process_constants=True
)
if all(i.broadcastable):
new_inputs.append(
tt.shape_padleft(tt.cast(cval_i, output_dtype), i.ndim)
)
else:
if shape_i is None:
return
new_inputs.append(
alloc(
tt.cast(cval_i, output_dtype),
*[shape_i(d)(i) for d in range(i.ndim)],
)
)
# print >> sys.stderr, "AAA",
# *[Shape_i(d)(i) for d in range(i.ndim)]
except NotScalarConstantError:
# for the case of a non-scalar
if isinstance(i, tt.TensorConstant):
new_inputs.append(tt.cast(i, output_dtype))
else:
new_inputs.append(i)
if new_inputs != node.inputs:
rval = [node.op(*new_inputs)]
if rval[0].type != node.outputs[0].type:
# This can happen for example when floatX=float32
# and we do the true division between and int64
# and a constant that will get typed as int8.
# As this is just to allow merging more case, if
# the upcast don't work, we can just skip it.
return
# Copy over output stacktrace from before upcasting
copy_stack_trace(node.outputs[0], rval)
return rval
##################
# Subtensor opts #
##################
@register_useless
@register_canonicalize
@register_specialize
@local_optimizer([IncSubtensor])
def local_useless_inc_subtensor(node):
"""
Remove IncSubtensor, when we overwrite the full inputs with the
new value.
"""
if not isinstance(node.op, IncSubtensor):
return
if node.op.set_instead_of_inc is False:
# This is an IncSubtensor, so the init value must be zeros
try:
c = get_scalar_constant_value(node.inputs[0], only_process_constants=True)
if c != 0:
return
except NotScalarConstantError:
return
if (
node.inputs[0].ndim != node.inputs[1].ndim
or node.inputs[0].broadcastable != node.inputs[1].broadcastable
):
# FB: I didn't check if this case can happen, but this opt
# don't support it.
return
# We have a SetSubtensor or an IncSubtensor on zeros
# If is this IncSubtensor useful?
# Check that we keep all the original data.
# Put the constant inputs in the slice.
idx_cst = get_idx_list(node.inputs[1:], node.op.idx_list)
if all(
isinstance(e, slice)
and e.start is None
and e.stop is None
and (
e.step is None
or tt.extract_constant(e.step, only_process_constants=True) == -1
)
for e in idx_cst
):
# IncSubtensor broadcast node.inputs[1] on node.inputs[0]
# based on run time shapes, so we must check they are the same.
if not hasattr(node.fgraph, "shape_feature"):
return
if not node.fgraph.shape_feature.same_shape(node.inputs[0], node.inputs[1]):
return
# There is no reverse, so we don't need a replacement.
if all(e.step is None for e in node.op.idx_list):
# They are the same shape, so we can remore this IncSubtensor
return [node.inputs[1]]
ret = Subtensor(node.op.idx_list)(*node.inputs[1:])
# Copy over previous output stacktrace
copy_stack_trace(node.outputs, ret)
return [ret]
@register_canonicalize
@local_optimizer([AdvancedIncSubtensor1])
def local_set_to_inc_subtensor(node):
"""
AdvancedIncSubtensor1(x, x[ilist]+other, ilist, set_instead_of_inc=True) ->
AdvancedIncSubtensor1(x, other, ilist, set_instead_of_inc=False)
"""
if (
isinstance(node.op, AdvancedIncSubtensor1)
and node.op.set_instead_of_inc
and node.inputs[1].owner
and isinstance(node.inputs[1].owner.op, Elemwise)
and isinstance(node.inputs[1].owner.op.scalar_op, ts.Add)
):
addn = node.inputs[1].owner
subn = None
other = None
if addn.inputs[0].owner and isinstance(
addn.inputs[0].owner.op, AdvancedSubtensor1
):
subn = addn.inputs[0].owner
other = addn.inputs[1]
elif addn.inputs[1].owner and isinstance(
addn.inputs[1].owner.op, AdvancedSubtensor1
):
subn = addn.inputs[1].owner
other = addn.inputs[0]
else:
return
if subn.inputs[1] != node.inputs[2] or subn.inputs[0] != node.inputs[0]:
return
ret = advanced_inc_subtensor1(node.inputs[0], other, node.inputs[2])
# Copy over previous output stacktrace
# Julian: I'm not sure about this at all...
copy_stack_trace(node.outputs, ret)
return [ret]
@register_useless
@register_canonicalize
@register_specialize
@local_optimizer([Subtensor])
def local_useless_slice(node):
"""
Remove Subtensor of the form X[0, :] -> X[0]
"""
if isinstance(node.op, Subtensor):
slices = get_idx_list(node.inputs, node.op.idx_list)
last_slice = len(slices)
for s in slices[::-1]:
# check if slice and then check slice indices
if (
isinstance(s, slice)
and s.start is None
and s.stop is None
and (
s.step is None
or tt.extract_constant(s.step, only_process_constants=True) == 1
)
):
last_slice -= 1
else:
break
# check if we removed something
if last_slice < len(slices):
subtens = Subtensor(slices[:last_slice])
sl_ins = Subtensor.collapse(
slices[:last_slice], lambda x: isinstance(x, Variable)
)
out = subtens(node.inputs[0], *sl_ins)
# Copy over previous output stacktrace
copy_stack_trace(node.outputs, out)
return [out]
@register_canonicalize
@register_specialize
@local_optimizer([Subtensor, AdvancedSubtensor1])
def local_useless_subtensor(node):
"""
Remove Subtensor/AdvancedSubtensor1 if it takes the full input. In the
AdvancedSubtensor1 case, the full input is taken when the indices are
equivalent to `arange(0, input.shape[0], 1)` using either an explicit
list/vector or the ARange op.
"""
# If the optimization is tried over a node that is not a part of graph before
if not hasattr(node, "fgraph"):
return
# This optimization needs ShapeOpt and fgraph.shape_feature
if not hasattr(node.fgraph, "shape_feature"):
return
shape_of = node.fgraph.shape_feature.shape_of
if isinstance(node.op, Subtensor):
cdata = node.op.get_constant_idx(
node.inputs, allow_partial=True, only_process_constants=True
)
for pos, idx in enumerate(cdata):
if not isinstance(idx, slice):
# If idx is not a slice, this means we remove this dimension
# from the output, so the subtensor is not useless
return False
if idx.start is not None and idx.start != 0:
# If the start of the slice is different from 0, or is a
# variable, then we assume the subtensor is not useless
return False
if idx.step is not None and idx.step != 1:
# If we are going backwards, or skipping elements, then this
# is not a useless subtensor
return False
for pos, idx in enumerate(cdata):
length_pos = shape_of[node.inputs[0]][pos]
if isinstance(idx.stop, (int, np.integer)):
length_pos_data = sys.maxsize
try:
length_pos_data = get_scalar_constant_value(
length_pos, only_process_constants=True
)
except NotScalarConstantError:
pass
if idx.stop < length_pos_data:
return False
elif isinstance(idx.stop, gof.Variable):
length_pos_shape_i = idx.stop
# length_pos is a tensor variable, but length_pos_shape_i
# is a scalar variable. We try to see if they represent
# the same underlying variable.
if length_pos_shape_i.owner and isinstance(
length_pos_shape_i.owner.op, ScalarFromTensor
):
length_pos_shape_i = length_pos_shape_i.owner.inputs[0]
elif length_pos.owner and isinstance(
length_pos.owner.op, TensorFromScalar
):
length_pos = length_pos.owner.inputs[0]
else:
# We did not find underlying variables of the same type
return False
# The type can be different: int32 vs int64. length_pos
# should always be int64 as that is what the shape
# tracker keep. Subtensor accept any scalar int{8,16,32,64}
# as index type.
assert str(length_pos.type.dtype) == "int64"
assert str(length_pos_shape_i.type.dtype) in [
"int8",
"int16",
"int32",
"int64",
]
# length_pos_shape_i cannot be None
if length_pos_shape_i != length_pos:
return False
elif idx.stop is None:
pass
else:
return False
elif isinstance(node.op, AdvancedSubtensor1):
# get length of the indexed tensor along the first axis
try:
length = get_scalar_constant_value(
shape_of[node.inputs[0]][0], only_process_constants=True
)
except NotScalarConstantError:
return False
# get index (which must be a vector by definition)
idx = node.inputs[1]
# `idx` must be equivalent to [0,1,...,shape[0] - 1] to qualify for
# this optimization
if isinstance(idx, tt.Constant):
idx = idx.value
if len(idx) != length:
return False
if np.any(idx != np.arange(length)):
return False
elif idx.owner is not None and isinstance(idx.owner.op, tt.ARange):
try:
start, stop, step = map(
lambda x: get_scalar_constant_value(x, only_process_constants=True),
idx.owner.inputs,
)
except NotScalarConstantError:
return False
if start != 0:
return False
if stop != length:
return False
if step != 1:
return False
else:
return False
else:
return False
# We don't need to copy over any stacktrace here,
# because previous stacktrace should suffice.
return [node.inputs[0]]
# fast_compile to allow opt subtensor(cast{float32}(make_vector))
@register_canonicalize("fast_compile")
@local_optimizer([Subtensor])
def local_subtensor_lift(node):
"""
unary(x)[idx] -> unary(x[idx])#any broadcast pattern.
Handles the following unary ops:
elemwise(x,...)[idx] -> elemwise(x[idx],...)
when x,... are broadcasted scalar or not broadcasted at all
rebroadcast(x)[idx] => rebroadcast(x[idx])
"""
if isinstance(node.op, Subtensor):
u = node.inputs[0]
if not u.owner or len(u.clients) > 1:
return False
if isinstance(u.owner.op, Elemwise) and len(u.owner.inputs) == 1:
idx = node.inputs[1:]
x_idx = node.op(u.owner.inputs[0], *idx)
# Copy over previous output stacktrace
copy_stack_trace(node.outputs, x_idx)
ret = u.owner.op(x_idx)
# Copy over previous output stacktrace
# and stacktrace from previous unary operation
copy_stack_trace([node.outputs[0], node.inputs[0]], ret)
return [ret]
if isinstance(u.owner.op, Elemwise):
new_inputs = []
if all([sum(i.type.broadcastable) == 0 for i in u.owner.inputs]):
# There is no broadcastable in the inputs
idx = node.inputs[1:]
new_inputs = [node.op(i, *idx) for i in u.owner.inputs]
# Copy over previous output stacktrace
copy_stack_trace(node.outputs[0], new_inputs)
ret = u.owner.op(*new_inputs)
# Copy over previous output stacktrace
# and stacktrace from previous unary operation
copy_stack_trace([node.outputs[0], node.inputs[0]], ret)
return [ret]
elif all(
[sum(i.type.broadcastable) in [i.ndim, 0] for i in u.owner.inputs]
):
# There is no broadcastable in the inputs or it is scalar
idx = node.inputs[1:]
new_inputs = []
for i in u.owner.inputs:
if sum(i.type.broadcastable) == 0:
new_inputs.append(node.op(i, *idx))
else:
# If the subtensor remove some dims, we must
# lower the number of dimensions of this scalar.
if node.outputs[0].ndim == i.ndim:
new_inputs.append(i)
else:
new_inputs.append(
i.dimshuffle(["x"] * node.outputs[0].ndim)
)
# Copy over previous output stacktrace
copy_stack_trace(node.outputs[0], new_inputs)
ret = u.owner.op(*new_inputs)
# Copy over previous output stacktrace
# and stacktrace from previous unary operation
copy_stack_trace([node.outputs[0], node.inputs[0]], ret)
return [ret]
if isinstance(u.owner.op, Rebroadcast):
# make sure that Rebroadcast has only 1 input
assert len(u.owner.inputs) == 1
# Subtensor might reduce dim., adapt broadcast pattern accordingly
new_axis = []
# loop through indices being subtensor-ed
# i indexes broadcastable pattern before subtensor
# j indexes broadcastable pattern after subtensor
j = 0
for (i, x) in enumerate(node.op.idx_list):
# if its not a slice, it will reduce the dimension, should
# not appear in the broascastable dimensions
if isinstance(x, slice):
new_axis += [(j, u.broadcastable[i])]
j += 1
# now keep the broadcastable pattern of all
# items not appearing in subtensor list
for i in range(len(node.op.idx_list), len(u.broadcastable)):
new_axis += [(j, u.broadcastable[i])]
j += 1
subt_x = node.op(u.owner.inputs[0], *node.inputs[1:])
# Copy over previous output stacktrace
copy_stack_trace(node.outputs[0], subt_x)
rbcast_subt_x = Rebroadcast(*new_axis)(subt_x)
# Copy over previous output stacktrace
# and stacktrace from previous unary operation
copy_stack_trace([node.outputs[0], node.inputs[0]], rbcast_subt_x)
return [rbcast_subt_x]
def merge_two_slices(slice1, len1, slice2, len2):
"""
This function merges two slices into a single slice. The code works on
the assumption that:
a) slice1 is actually a slice and not an index, while slice2
can be just an index.
b) the two slices **have been applied consecutively** on the same
tensor
The output slice is **not** in canonical form, but actually just a slice
that can be applied to a tensor to produce the same output as applying
the two consecutive slices.
``len1`` is the length of the tensor **before** applying the first slice,
while ``len2`` is the length **after** applying the first slice.
"""
list_opt = [
local_abs_merge,
local_mul_switch_sink,
local_upcast_elemwise_constant_inputs,
local_useless_switch,
constant_folding,
]
if type(slice1) is not slice:
raise ValueError(
(
"First provided slice should actually be of type"
"slice and not an index !"
),
slice1,
)
sl1, reverse1 = get_canonical_form_slice(slice1, len1)
sl2, reverse2 = get_canonical_form_slice(slice2, len2)
if type(sl2) is not slice:
if reverse1 is None:
# The first slice is not in reverse, which makes things a lot
# more clear.
# In this case we need to take care only of the special cases:
# len2 <=0 -> throw index error regardless of sl2
# sl2 > len2 -> throw index error
# sl2 < -len2 -> throw index error
# To get a index error we simply use len1+1 to indicate we are
# out of bounds, because passing this index through the formula
# of getting the mixed slice is not guaranteed to result in an
# index error. The **issue though** if that the error will
# complain about accessing element len1+1 which is probably not
# too intuitive for the user
val = sl1.start + sl2 * sl1.step
val = tt.switch(tt.le(len2, 0), len1 + 1, val)
val = tt.switch(tt.ge(sl2, len2), len1 + 1, val)
val = tt.switch(tt.lt(sl2, 0), -len1 - 1, val)
if sl1.step:
val = tt.switch(tt.eq(sl1.step, 0), len1 + 1, val)
val = pre_greedy_local_optimizer(list_opt, val)
return val
else:
# We are in the more complex case when we do not actually know
# if the first slice was in reverse or not.
# in case it was not in reverse:
p_val = sl1.start + sl2 * sl1.step
# case it was in reverse we need to realize that we do not want
# the k-th element from sl.start but the k-th element from
# sl.stop backwards
n_val = sl1.stop - 1 - sl2 * sl1.step
if config.warn.subtensor_merge_bug:
warnings.warning(
"Your current code is fine, but Theano versions "
"prior to 0.5rc2 might have given an incorrect result. "
"To disable this warning, set the Theano flag "
"warn.subtensor_merge_bug to False."
)
# we need to pick either n_val or p_val and then follow same
# steps as above for covering the index error cases
val = tt.switch(tt.lt(reverse1, 0), n_val, p_val)
val = tt.switch(tt.le(len2, 0), len1 + 1, val)
val = tt.switch(tt.ge(sl2, len2), len1 + 1, val)
val = tt.switch(tt.lt(sl2, 0), -len1 - 1, val)
if sl1.step:
val = tt.switch(tt.eq(sl1.step, 0), len1 + 1, val)
val = pre_greedy_local_optimizer(list_opt, val)
return val
else:
# We are deleaing with two slices that need to be put together
# according to the two steps we have 4 different combinations of
# positive/negative. I will denote the case I'm looking at by
# suffixes to the variables (nn,np,pn,pp):
flen = sl2.stop - sl2.start
p_step = sl1.step * sl2.step
n_step = sl1.step * sl2.step * -1
pp_start = tt.minimum(sl1.start + sl2.start * sl1.step, sl1.stop)
pp_stop = tt.minimum(sl1.start + sl2.stop * sl1.step, sl1.stop)
pn_stop = sl1.start + (sl2.start - 1) * sl1.step
pn_stop = tt.switch(
tt.and_(tt.lt(pn_stop, 0), tt.gt(flen, 0)),
-len1 - 1,
tt.minimum(pn_stop, sl1.stop),
)
pn_start = sl1.start + (sl2.stop - 1) * sl1.step
pn_start = tt.minimum(pn_start, sl1.stop)
pn_start = tt.maximum(pn_start, 0)
np_stop = sl1.stop - sl2.stop * sl1.step - 1
np_stop = tt.switch(
tt.and_(tt.lt(np_stop, 0), tt.gt(flen, 0)),
-len1 - 1,
tt.maximum(sl1.start - 1, np_stop),
)
np_start = tt.maximum(sl1.start, sl1.stop - sl2.start * sl1.step - 1)
nn_start = tt.maximum(sl1.start, (sl1.stop - 1) - (sl2.stop - 1) * sl1.step)
nn_stop = tt.maximum(sl1.start, sl1.stop - sl2.start * sl1.step)
start = tt.switch(
tt.lt(reverse2 * reverse1, 0),
tt.switch(tt.lt(reverse1, 0), np_start, pn_start),
tt.switch(tt.lt(reverse1, 0), nn_start, pp_start),
)
stop = tt.switch(
tt.lt(reverse2 * reverse1, 0),
tt.switch(tt.lt(reverse1, 0), np_stop, pn_stop),
tt.switch(tt.lt(reverse1, 0), nn_stop, pp_stop),
)
step = tt.switch(tt.lt(reverse2 * reverse1, 0), n_step, p_step)
start = tt.switch(tt.le(flen, 0), 0, start)
stop = tt.switch(tt.le(flen, 0), 0, stop)
# The canonical form of the slice is pretty complicated
# and is not simplified. We simplify it in advance here
# as otherwise this create too many useless optimization that
# DebugMode must check.
start = pre_greedy_local_optimizer(list_opt, start)
stop = pre_greedy_local_optimizer(list_opt, stop)
step = pre_greedy_local_optimizer(list_opt, step)
start = pre_greedy_local_optimizer(list_opt, start)
stop = pre_greedy_local_optimizer(list_opt, stop)
step = pre_greedy_local_optimizer(list_opt, step)
# Pre merge constant for the same reason.
start, stop, step = pre_constant_merge([start, stop, step])
return slice(start, stop, step)
@register_canonicalize
@register_specialize
@local_optimizer([Subtensor])
def local_subtensor_merge(node):
"""
Refactored optimization to deal with all cases of tensor merging.
Given a subgraph of the form Subtensor(Subtensor(u)), the optimization
expresses all slices in a canonical form, and then merges them together.
"""
if isinstance(node.op, Subtensor):
u = node.inputs[0]
if u.owner and isinstance(u.owner.op, Subtensor):
# We can merge :)
# x actual tensor on which we are picking slices
x = u.owner.inputs[0]
# slices of the first applied subtensor
slices1 = get_idx_list(u.owner.inputs, u.owner.op.idx_list)
slices2 = get_idx_list(node.inputs, node.op.idx_list)
# Get the shapes of the vectors !
try:
# try not to introduce new shape into the graph
xshape = node.fgraph.shape_feature.shape_of[x]
ushape = node.fgraph.shape_feature.shape_of[u]
except AttributeError:
# Following the suggested use of shape_feature which should
# consider the case when the compilation mode doesn't
# include the ShapeFeature
xshape = x.shape
ushape = u.shape
merged_slices = []
pos_2 = 0
pos_1 = 0
while (pos_1 < len(slices1)) and (pos_2 < len(slices2)):
slice1 = slices1[pos_1]
if type(slice1) is slice:
merged_slices.append(
merge_two_slices(
slice1, xshape[pos_1], slices2[pos_2], ushape[pos_2]
)
)
pos_2 += 1
else:
merged_slices.append(slice1)
pos_1 += 1
if pos_2 < len(slices2):
merged_slices += slices2[pos_2:]
else:
merged_slices += slices1[pos_1:]
merged_slices = tuple(as_index_constant(s) for s in merged_slices)
subtens = Subtensor(merged_slices)
sl_ins = Subtensor.collapse(
merged_slices, lambda x: isinstance(x, Variable)
)
# Do not call make_node for test_value
out = subtens(x, *sl_ins)
# Copy over previous output stacktrace
# and stacktrace from previous slicing operation.
# Why? Because, the merged slicing operation could have failed
# because of either of the two original slicing operations
orig_out = node.outputs[0]
copy_stack_trace([orig_out, node.inputs[0]], out)
# Restore original broadcastable dimensions that `subtens()` may
# have been unable to infer again
if out.type != orig_out.type:
assert out.dtype == orig_out.dtype
assert out.ndim == orig_out.ndim
out = tt.patternbroadcast(out, orig_out.broadcastable)
copy_stack_trace([orig_out, node.inputs[0]], out)
return [out]
@register_useless
@register_canonicalize
@register_specialize
@local_optimizer([Subtensor])
def local_subtensor_of_alloc(node):
"""
alloc(val)[x:y] -> alloc(val[...])
alloc(val)[x:y] -> alloc(val)
This can be seen as a lift, but it also reduce the number of computation/memory.
"""
if not isinstance(node.op, Subtensor):
return False
u = node.inputs[0]
if u.owner is None:
return False
if not isinstance(u.owner.op, Alloc):
return False
slices = get_idx_list(node.inputs, node.op.idx_list)
val = u.owner.inputs[0]
dims = u.owner.inputs[1:]
assert len(slices) <= len(dims)
# Number of dimensions added to val
n_added_dims = u.ndim - val.ndim
# Dimensions of the returned alloc
nw_dims = []
# Slices to take from val
val_slices = []
for i, (sl, dim) in enumerate(zip(slices, dims)):
# If val was not copied over that dim,
# we need to take the appropriate subtensor on it.
if i >= n_added_dims:
# We check that the corresponding val dimensions was
# not a broadcasted dimensions.
if (
val.type.ndim > (i - n_added_dims)
and val.type.broadcastable[i - n_added_dims]
):
val_slices.append(slice(None))
else:
val_slices.append(sl)
csl, _ = get_canonical_form_slice(sl, dim)
if type(csl) is not slice:
# That dimension is removed.
pass
else:
nw_dim = csl.stop - csl.start
if csl.step != 1:
# Do not add the ceil_intdiv() graphs in the graphs
# when this is not needed as it prevent detecting the
# correct broadcast pattern.
nw_dim = tt.ceil_intdiv(nw_dim, csl.step)
nw_dims += [nw_dim]
nw_val = val[tuple(val_slices)]
nw_dims += dims[len(slices) :]
if nw_val.ndim > len(nw_dims):
return False
rval = alloc(nw_val, *nw_dims)
if type(rval) not in (list, tuple):
rval = [rval]
if rval[0].type != node.outputs[0].type:
# It happen that the make_node() isn't able to infer the same pattern.
# We know it is safe, so fix that.
rval[0] = tt.patternbroadcast(rval[0], node.outputs[0].broadcastable)
return rval
@register_canonicalize
@register_stabilize
@register_specialize
@local_optimizer([Subtensor])
def local_subtensor_of_dot(node):
"""
This optimization translates T.dot(A, B)[idxs] into T.dot(A[idxs_a], B[idxs_b]),
where idxs_a and idxs_b are defined appropriately.
idxs_a is the first A.ndim-1 entries of idxs,
and idxs_b is the remaining entries of idxs (if any),
modified to skip the second-to-last dimension of B
(because dot sums over this dimension).
"""
if not isinstance(node.op, Subtensor):
return
if not node.inputs[0].owner or not isinstance(node.inputs[0].owner.op, Dot):
return
# If there is other node that use the outputs of the dot
# We don't want to compute twice the sub part.
if len(node.inputs[0].clients) > 1:
return
a = node.inputs[0].owner.inputs[0]
b = node.inputs[0].owner.inputs[1]
idx_list = get_idx_list(node.inputs, node.op.idx_list)
num_a_indices = min(a.ndim - 1, len(idx_list))
a_indices = idx_list[:num_a_indices]
b_indices = idx_list[num_a_indices:]
# This is necessary because np.dot sums the last index of a with the second to last of b
# so we want to skip the second-to-last index into b.
# This wasn't necessary for a, because we just omitted the last index.
# We skip this if b.ndim = 1, since then we just want b_sub = b, not b_sub = b[:]
# (dot also handles b.ndim < 2 as a special case)
if b.ndim > 1 and len(b_indices) >= b.ndim - 1:
b_indices = (
b_indices[: b.ndim - 2]
+ (slice(None, None, None),)
+ b_indices[b.ndim - 2 :]
)
a_sub = a.__getitem__(tuple(a_indices))
b_sub = b.__getitem__(tuple(b_indices)) if b_indices else b
# Copy over previous output stacktrace to a_sub and b_sub,
# because an error in the subtensor operation (e.g. an index error)
# on either a or b must correspond to an error in the
# subtensor operation on their dot product.
copy_stack_trace(node.outputs[0], [a_sub, b_sub])
# Copy over previous output stacktrace and previous dot product stacktrace,
# because an error here may correspond to an either in either the original
# dot product, or in the dot product after the subtensor operation.
r = tt.dot(a_sub, b_sub)
copy_stack_trace([node.outputs[0], node.inputs[0]], r)
return [r]
@register_canonicalize
@local_optimizer([add])
def local_IncSubtensor_serialize(node):
"""
When using Subtensor, gradient graphs can be ugly.
If we ask for grad(f(a[0]), a), we are going to get something like
IncSubtensor(Elemwise{second}(a, 0), g(f(a[0])), [0])
This might be ugly, but at least it's as fast as you could want.
If we ask for grad(f(a[0], a[1], a[2]), a), it's much worse...
Elemwise{Add}
IncSubtensor(Elemwise{second}(a, 0), g(f(a[0])), [0])
IncSubtensor(Elemwise{second}(a, 0), g(f(a[1])), [1])
IncSubtensor(Elemwise{second}(a, 0), g(f(a[2])), [2])
This is much worse because this time we have to produce 3 matrices
the size of 'a', just so we can add them together.
This Op rearranges IncSubtensor's that all work on the same
initial argument (here, Elemwise{second}(a,0)) into a chain. The
advantage of the chain structure is that each one can be optimized
later in the pipeline to operate inplace.
Ideally, the op will do something like this:
#
# add(x, incsubtensor(b, c), incsubtensor(b, d))
# -> incsubtensor(incsubtensor(add(x,b,b), c), d)
"""
def movable(i):
# Return True iff this is a incsubtensor that we can move
return (
i.owner
and isinstance(
i.owner.op,
(
IncSubtensor,
AdvancedIncSubtensor1,
AdvancedIncSubtensor,
),
)
and i.type == o_type
and len(i.clients) == 1
and not i.owner.op.set_instead_of_inc
)
if node.op == add:
o_type = node.outputs[0].type
movable_inputs = [i for i in node.inputs if movable(i)]
if movable_inputs:
new_inputs = [i for i in node.inputs if not movable(i)] + [
mi.owner.inputs[0] for mi in movable_inputs
]
if len(new_inputs) == 0:
new_add = new_inputs[0]
else:
new_add = add(*new_inputs)
# Copy over stacktrace from original output, as an error
# (e.g. an index error) in this add operation should
# correspond to an error in the original add operation.
copy_stack_trace(node.outputs[0], new_add)
# stack up the new incsubtensors
tip = new_add
for mi in movable_inputs:
assert tip.type == o_type
assert tip.type == mi.owner.inputs[0].type
tip = mi.owner.op(tip, *mi.owner.inputs[1:])
# Copy over stacktrace from outputs of the original
# "movable" operation to the new operation.
copy_stack_trace(node.outputs + mi.owner.outputs, tip)
return [tip]
# print incsub_inputs, [id(i.owner.inputs[0]) for i in incsub_inputs]
# We register it in a TopoOptimizer inside the canonizer EQ optimizer.
# Otherwise in some cases it was making the EQ optimizer use 45. In
# the TopoOptimizer, the EQ only use 5 passes.
compile.optdb.register(
"pre_local_IncSubtensor_serialize",
in2out(local_IncSubtensor_serialize),
# Just before canonizer
0.99,
"fast_run",
)
# after priority 50 Destructive inplace operations
# gemm is the first one now, at priority 70
@local_optimizer([IncSubtensor], inplace=True)
def local_inplace_setsubtensor(node):
"""
Also work for GpuIncSubtensor.
"""
if isinstance(node.op, IncSubtensor) and not node.op.inplace:
dta = node.op.destroyhandler_tolerate_aliased
new_op = node.op.__class__(
node.op.idx_list,
inplace=True,
set_instead_of_inc=node.op.set_instead_of_inc,
destroyhandler_tolerate_aliased=dta,
)
new_node = new_op(*node.inputs)
val = getattr(node.outputs[0].tag, "nan_guard_mode_check", True)
new_node.tag.nan_guard_mode_check = val
# Copy stacktrace from original outputs to new outputs.
# This is sensible, because the new operation is the
# same as the old one, but now with different attributes.
copy_stack_trace(node.outputs, new_node)
return [new_node]
return False
compile.optdb.register(
"local_inplace_setsubtensor",
TopoOptimizer(
local_inplace_setsubtensor, failure_callback=TopoOptimizer.warn_inplace
),
60,
"fast_run",
"inplace",
) # DEBUG
@local_optimizer([AdvancedIncSubtensor1], inplace=True)
def local_inplace_incsubtensor1(node):
"""
Also work for GpuAdvancedIncSubtensor1.
"""
if isinstance(node.op, AdvancedIncSubtensor1) and not node.op.inplace:
new_op = node.op.clone_inplace()
new_node = new_op(*node.inputs)
# Copy stacktrace from original outputs to new outputs.
# This is sensible, because the new operation is the
# same as the old one, but now with different attributes.
copy_stack_trace(node.outputs, new_node)
return [new_node]
return False
compile.optdb.register(
"local_inplace_incsubtensor1",
TopoOptimizer(
local_inplace_incsubtensor1, failure_callback=TopoOptimizer.warn_inplace
),
60,
"fast_run",
"inplace",
) # DEBUG
# Register old name
@register_canonicalize("local_incsubtensor_of_allocs")
@register_stabilize("local_incsubtensor_of_allocs")
@local_optimizer([IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1])
def local_incsubtensor_of_zeros(node):
"""
IncSubtensor(x, zeros, idx) -> x
"""
if (
isinstance(node.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1))
and not node.op.set_instead_of_inc
):
x = node.inputs[0]
y = node.inputs[1]
try:
# Don't use only_process_constants=True. We need to
# investigate Alloc of 0s but with non constant shape.
if get_scalar_constant_value(y, elemwise=False) == 0:
# No need to copy over the stacktrace,
# because x should already have a stacktrace
return [x]
except NotScalarConstantError:
return
@register_canonicalize
@register_specialize
@local_optimizer([IncSubtensor])
def local_incsubtensor_of_zeros_to_setsubtensor(node):
"""
IncSubtensor(zeros, x, ...) -> SetSubtensor(zeros, x, ...)
"""
if isinstance(node.op, (IncSubtensor)) and not node.op.set_instead_of_inc:
x = node.inputs[0]
if isinstance(x, tt.Constant) and not np.any(x.data):
return [
IncSubtensor(
node.op.idx_list,
node.op.inplace,
set_instead_of_inc=True,
destroyhandler_tolerate_aliased=node.op.destroyhandler_tolerate_aliased,
)(*node.inputs)
]
@register_canonicalize("local_setsubtensor_of_allocs")
@register_stabilize("local_setsubtensor_of_allocs")
@local_optimizer([IncSubtensor])
def local_setsubtensor_of_constants(node):
"""
SetSubtensor(x, x[idx], idx) -> x
when x is constant or alloc.
"""
if isinstance(node.op, IncSubtensor) and node.op.set_instead_of_inc:
x = node.inputs[0]
y = node.inputs[1]
# Don't use only_process_constants=True. We need to
# investigate Alloc of 0s but with non constant shape.
try:
replace_x = get_scalar_constant_value(x, elemwise=False)
except NotScalarConstantError:
return
try:
replace_y = get_scalar_constant_value(y, elemwise=False)
except NotScalarConstantError:
return
if replace_x == replace_y:
# No need to copy over the stacktrace,
# because x should already have a stacktrace
return [x]
else:
return False
@register_canonicalize
@register_stabilize
@local_optimizer([AdvancedSubtensor1])
def local_adv_sub1_adv_inc_sub1(node):
"""Optimize the possible AdvSub1(AdvSetSub1(...), ...).
AdvancedSubtensor1(AdvancedSetSubtensor1(x, y, idx), idx) -> y
Notes
-----
This opt add AssertOp. Otherwise, it would remove shape and
index error. If you want to get rid of them, see the
:ref:`unsafe_optimization` section.
WARNING:
A previous version of this optimization also matched
AdvancedSubtensor1(AdvancedIncSubtensor1(0s, y, idx), idx) -> y
This is incorrect when there are duplicate indices.
The current version warns the user about potential past issues.
"""
if not isinstance(node.op, AdvancedSubtensor1):
return
inp = node.inputs[0]
if not inp.owner or not isinstance(inp.owner.op, AdvancedIncSubtensor1):
return
idx = node.inputs[1]
idx2 = inp.owner.inputs[2]
x = inp.owner.inputs[0]
y = inp.owner.inputs[1]
if idx is not idx2:
return
if (
not inp.owner.op.set_instead_of_inc
and
# Don't use only_process_constants=True. We need to
# investigate Alloc of 0s but with non constant shape.
tt.extract_constant(x, elemwise=False) != 0
):
return
if not inp.owner.op.set_instead_of_inc:
if config.warn.inc_subtensor1_opt:
warnings.warning(
"Your current code is fine, but Theano versions "
"between 0.7rc1 and 0.10 (or development versions "
"between Nov. 2014 and May 2017) "
"might have given incorrect results. This graph has "
"following pattern: inc_subtensor(zeros[idx], x)[idx], "
"where idx is an array of integers. This used to be "
'optimized to "x", which is incorrect if there are '
"duplicated indices in idx. "
"To disable this warning, set the Theano flag "
"warn.inc_subtensor1_opt to False."
)
return
cond = [tt.all(tt.and_(tt.lt(idx, x.shape[0]), tt.ge(idx, -x.shape[0])))]
if not node.fgraph.shape_feature.same_shape(idx, y, 0, 0):
cond.append(tt.eq(idx.shape[0], y.shape[0]))
r = Assert(
"Bad indexing or shapes in a AdvancedIncSubtensor1 " "that was optimized away"
)(y, *cond)
copy_stack_trace(y, r)
if r.dtype == node.outputs[0].dtype:
return [r]
# It is possible that y is upcast or downcast to x.dtype.
# In all case, as we set or add with 0, we can just cast y.
r2 = tt.cast(r, node.outputs[0].dtype)
# Copy over stacktrace from before casting, since
# we don't expect problems in the casting operation,
# and any problems in the indexing would have been spotted above.
copy_stack_trace(r, r2)
return [r2]
@register_specialize
@register_stabilize
@register_canonicalize
@register_useless
@local_optimizer([IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1])
def local_useless_inc_subtensor_alloc(node):
"""
Replaces an [Advanced]IncSubtensor[1], whose increment is an `alloc` of
a fully or partially broadcastable variable, by one that skips the
intermediate `alloc` where possible.
"""
if isinstance(node.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1)):
x = node.inputs[0]
y = node.inputs[1]
i = node.inputs[2:]
if y.owner is not None and isinstance(y.owner.op, Alloc):
# `z` is the input of the Alloc op, i.e. T.alloc(z, <shape>)
z = y.owner.inputs[0]
try:
shape_feature = node.fgraph.shape_feature
except AttributeError:
# The shape feature may not be available in some mode, but we
# need it for this optimization, so don't continue.
return False
shape_of = shape_feature.shape_of
same_shape = shape_feature.same_shape
# Get the subtensor of `x` indexed by `i` in order to compare
# shapes later.
if isinstance(node.op, IncSubtensor):
xi = Subtensor(node.op.idx_list)(x, *i)
elif isinstance(node.op, AdvancedIncSubtensor):
xi = advanced_subtensor(x, *i)
elif isinstance(node.op, AdvancedIncSubtensor1):
xi = advanced_subtensor1(x, *i)
else:
raise Exception("Should never happen!")
reason = "local_useless_incsubtensor_alloc"
# Add `xi` to the shape feature `fgraph`. This is important for
# shape inference later because the variable must be part of the
# function graph in order to call `same_shape` on it.
if xi not in shape_of:
shape_feature.on_import(node.fgraph, xi.owner, "%s: add `xi`" % reason)
# `xi` may have more dimensions than `y` since the subtensor ops
# do automatic broadcasting of the increment internally. Thus, we
# need to make the leading implicitly broadcasted dimensions
# explicit for shape comparison later.
if xi.ndim > y.ndim:
y = tt.shape_padleft(y, xi.ndim - y.ndim)
if y not in shape_of:
shape_feature.on_import(
node.fgraph, y.owner, "%s: add `y`" % reason
)
# Build `z_broad` explicitly to include extra implicit dimensions.
z_broad = (True,) * (xi.ndim - z.ndim) + z.broadcastable
cond = [
# The shapes of `y` and `xi` must either agree or `y` may
# also have shape equal to 1 which may be treated as a
# broadcastable dimension by the subtensor op.
tt.or_(tt.eq(y.shape[k], 1), tt.eq(y.shape[k], xi.shape[k]))
# Loop over all dimensions.
for k in range(xi.ndim)
# We need to check the above shapes, if
# * the pre-alloc increment `z` is broadcastable in
# dimension `k` (if it isn't, then the shapes of `z` and
# `y` are the same by the definition of the `Alloc` op in
# this dimension and replacing `y` by `z` will not hide a
# shape error), and
# * `xi` and `y` do not have the same shape in dimension
# `k` or we cannot infer the shape statically (if the
# shapes of `xi` and `y` are not the same, then replacing
# `y` by `z` will hide the shape error of `y`), and
# * the shape of `y` is not equal to 1 or we cannot infer
# the shape statically (if the shape of `y` is equal to
# 1, then `y` is broadcasted by the inc_subtensor op
# internally, so the shapes of `xi` and `y` do not need
# to match in dimension `k`; else we need to check at
# runtime that the shape of `y` is either 1 or the same
# as `xi` or otherwise replacing `y` by `z` will hide a
# shape error).
if (
z_broad[k]
and not same_shape(xi, y, dim_x=k, dim_y=k)
and shape_of[y][k] != 1
)
]
if len(cond) > 0:
msg = "`x[i]` and `y` do not have the same shape."
z = Assert(msg)(z, *cond)
r = node.op(x, z, *i)
# Copy over stacktrace from previous output, since
# we don't expect problems when removing the intermediate
# alloc operation and so we still want to point at the line
# of the inc_subtensor operation.
copy_stack_trace(node.outputs, r)
return [r]
####################
# Rebroadcast opts #
####################
@register_useless
@register_canonicalize
@register_specialize
@local_optimizer([Rebroadcast])
def local_useless_rebroadcast(node):
"""
Remove Rebroadcast if id does not actually change the broadcasting pattern.
"""
if isinstance(node.op, Rebroadcast):
x = node.inputs[0]
if np.all(x.broadcastable == node.outputs[0].broadcastable):
# No broadcastable flag was modified
# No need to copy over stack trace,
# because x should already have a stack trace.
return [x]
else:
# Keep the flags that modify something
new_axis = {}
for dim, bc in list(node.op.axis.items()):
if x.broadcastable[dim] != bc:
new_axis[dim] = bc
if new_axis == node.op.axis:
# All flags are useful
return
else:
r = Rebroadcast(*list(new_axis.items()))(x)
# Copy over stacktrace from previous output
copy_stack_trace(node.outputs, r)
return [r]
@register_canonicalize
@register_specialize
@local_optimizer([Rebroadcast])
def local_rebroadcast_lift(node):
"""
Lifts Rebroadcast through unary Elemwise operations,
and merges consecutive Rebroadcasts.
Rebroadcast(Elemwise(x)) => Elemwise(Rebroadcast(x))
Rebroadcast(Rebroadcast(x)) => Rebroadcast(x)
"""
op = node.op
if not isinstance(op, Rebroadcast):
return False
input = node.inputs[0]
inode = input.owner
if inode and isinstance(inode.op, Elemwise) and len(inode.inputs) == 1:
# It may happen that `input` has no client because this optimization
# is called from `apply_rebroadcast_opt`, which in particular is used
# by the `unbroadcast` function before we are in the actual function
# compilation phase.
if hasattr(input, "clients") and len(input.clients) == 1:
rebroadcasted = Rebroadcast(*list(op.axis.items()))(inode.inputs[0])
# Copy over stacktrace from previous output (after rebroadcasting)
# to new output, because an error in the new graph right after
# rebroadcasting must have been caused by the previous rebroadcasting.
copy_stack_trace(node.outputs, rebroadcasted)
rval = inode.op.make_node(rebroadcasted).outputs
# Copy over stacktrace from previous output (after rebroadcasting)
# and input (after elemwise operation) to new output, because an
# error in the new graph could have been caused by either of the
# two ops.
copy_stack_trace(node.outputs + node.inputs, rval)
return rval
if inode and isinstance(inode.op, Rebroadcast):
# the "axis" specification in the outer Rebroadcast overrides
# the axis of the inner one
axis = inode.op.axis.copy()
axis.update(op.axis)
iinput = inode.inputs[0]
rval = [Rebroadcast(*list(axis.items()))(iinput)]
# Copy over stacktrace from previous output (after second rebroadcast)
# and from previous input (after first rebroadcast op) because an error in
# the new graph could have been caused by either of the two
# rebroadcast ops.
copy_stack_trace(node.outputs + node.inputs, rval)
return rval
def apply_rebroadcast_opt(rval):
"""
Apply as many times as required the optimization local_useless_rebroadcast
and local_rebroadcast_lift.
Parameters
----------
rval: a Variable
Returns
-------
A Variable (the same if no optimization can be applied)
"""
changed = True
while changed and rval.owner:
changed = False
rval2 = local_useless_rebroadcast.transform(rval.owner)
if rval2:
assert len(rval2) == 1
rval = rval2[0]
changed = True
if rval.owner:
rval2 = local_rebroadcast_lift.transform(rval.owner)
if rval2:
assert len(rval2) == 1
rval = rval2[0]
changed = True
return rval
#############
# Join opts #
#############
@register_specialize
@register_canonicalize
@register_useless
@local_optimizer([Join])
def local_join_1(node):
"""Join(i, x) => x
Remove Join() when only one element is joined.
"""
if not isinstance(node.op, Join):
return
tensors = node.inputs[1:]
if len(tensors) == 1:
# We don't need to copy over any stacktrace here, because the
# input variable should already have its own stacktrace.
return [tensors[0]]
# TODO: merge in local_useless_join
@register_useless
@register_specialize
@register_canonicalize
@local_optimizer([Join])
def local_join_empty(node):
"""Join(i, x, y, empty) => Join(i, x, y)
Remove empty inputs to joins. The empty inputs can be anywhere.
"""
if not isinstance(node.op, Join):
return
new_inputs = []
try:
join_idx = get_scalar_constant_value(
node.inputs[0], only_process_constants=True
)
except NotScalarConstantError:
return
for idx in range(1, len(node.inputs)):
inp = node.inputs[idx]
# We can not use size == 0,, as this can change shape from 3,0
# to 2,0. This trigger DebugMode error. This happen with
# stack(...,[]) as this add a dimshuffle on [], that add a
# dimensions with shape 1.
if isinstance(inp, theano.Constant) and inp.data.shape[join_idx] == 0:
continue
new_inputs.append(inp)
if len(new_inputs) < len(node.inputs) - 1:
if len(new_inputs) == 0:
# T.join do not work in that case.
# constant folding will take care of this case.
return
ret = tt.join(node.inputs[0], *new_inputs)
o = node.outputs[0]
if ret.dtype != o.dtype:
# Join can upcast some inputs
return
# Copy over stacktrace from previous output (after join op)
# to new output, because an error in the new op must be caused
# by an error in the old join op.
copy_stack_trace(node.outputs, ret)
if ret.type != o.type:
assert ret.dtype == o.dtype
assert ret.ndim == o.ndim
ret = tt.patternbroadcast(ret, node.outputs[0].broadcastable)
# Copy over stacktrace from previous output
# (after patternbroadcast op) for same reasons as before.
copy_stack_trace(node.outputs, ret)
return [ret]
@register_specialize
@register_canonicalize
@register_useless
@local_optimizer([Join])
def local_join_make_vector(node):
"""Join(0, make_vector1, make_vector2, ...) => Join(0, make_vector12, ...)
Merge MakeVector inputs to Join. This can make the join completly
disapear with the local_join_1 opt.
"""
if not isinstance(node.op, Join) or node.outputs[0].ndim != 1:
return
new_inputs = [node.inputs[1]]
for idx in range(2, len(node.inputs)):
inp = node.inputs[idx]
if (
inp.owner
and isinstance(inp.owner.op, MakeVector)
and new_inputs[-1].owner
and isinstance(new_inputs[-1].owner.op, MakeVector)
and
# MakeVector have a dtype parameter
inp.owner.op == new_inputs[-1].owner.op
):
inps = new_inputs[-1].owner.inputs + inp.owner.inputs
new_inputs[-1] = inp.owner.op(*inps)
# Copy over stacktrace from previous output (after join op)
# to new intermediate output, because an error in the intermediate
# op must be caused by an error in the old join op.
copy_stack_trace(node.outputs, new_inputs[-1])
else:
new_inputs.append(inp)
if len(new_inputs) < len(node.inputs) - 1:
ret = tt.join(node.inputs[0], *new_inputs)
# Copy over stacktrace from previous output (after join op)
# to new output, because an error in the new op must be caused
# by an error in the old join op.
copy_stack_trace(node.outputs, ret)
return [ret]
#################
# speed/memory #
#################
@register_canonicalize
@register_specialize
@local_optimizer([Sum])
def local_sumsqr2dot(node):
"""
This optimization detects T.sqr( W.dimshuffle('x',0,1) * G.dimshuffle(0,'x',1) ).sum(axis=(1,2))
and converts this to T.dot(T.sqr(G), T.sqr(W).sum(axis=0)).
"""
if (
isinstance(node.op, Sum)
and isinstance(node.op.scalar_op, ts.Add)
and node.op.axis == (1, 2)
):
in1 = node.inputs[0]
out = node.outputs[0]
if (
in1.owner
and isinstance(in1.owner.op, Elemwise)
and isinstance(in1.owner.op.scalar_op, ts.Sqr)
):
in_sqr = in1.owner.inputs[0]
if (
in_sqr.owner
and isinstance(in_sqr.owner.op, Elemwise)
and isinstance(in_sqr.owner.op.scalar_op, ts.Mul)
and len(in_sqr.owner.inputs) == 2
):
in_mul1, in_mul2 = in_sqr.owner.inputs
if (
isinstance(in_mul1.owner.op, tt.elemwise.DimShuffle)
and in_mul1.owner.op.new_order == ("x", 0, 1)
and isinstance(in_mul2.owner.op, tt.elemwise.DimShuffle)
and in_mul2.owner.op.new_order == (0, "x", 1)
):
W = in_mul1.owner.inputs[0]
G = in_mul2.owner.inputs[0]
new_out = tt.dot(tt.sqr(G), tt.sqr(W).sum(axis=0))
if new_out.dtype != out.dtype:
new_out = tt.cast(new_out, dtype=out.dtype)
return [new_out]
#################
# Exp stability #
#################
@register_stabilize
@register_specialize
@register_canonicalize
@local_optimizer([Elemwise])
def local_expm1(node):
"""
This optimization detects exp(a)-1 and converts this to expm1(a).
"""
if isinstance(node.op, Elemwise) and isinstance(node.op.scalar_op, ts.Sub):
in1, in2 = node.inputs
out = node.outputs[0]
if (
in1.owner
and isinstance(in1.owner.op, Elemwise)
and isinstance(in1.owner.op.scalar_op, ts.Exp)
and tt.extract_constant(in2, only_process_constants=False) == 1
):
in11 = in1.owner.inputs[0]
new_out = tt.expm1(in11)
if new_out.dtype != out.dtype:
new_out = tt.cast(new_out, dtype=out.dtype)
if new_out.type != out.type:
return
return [new_out]
###############
# Switch opts #
###############
@register_useless("local_remove_switch_const_cond")
@register_canonicalize("fast_compile", "local_remove_switch_const_cond")
@register_specialize
@local_optimizer([Elemwise])
def local_useless_switch(node):
"""
This optimization makes the following changes in the graph:
T.switch(cond,left,right) -->
if cond is constant and cond == 0: right
if cond is constant and cond != 0: left
if left is right -> left
T.switch(le(shape_i{id}(X), 0), 0, shape_i{id}(X)) -> shape_i{id}(X)
"""
if isinstance(node.op, Elemwise) and isinstance(node.op.scalar_op, ts.Switch):
cond = tt.extract_constant(node.inputs[0], only_process_constants=True)
if (type(cond) is np.ndarray and cond.ndim == 0) or isinstance(cond, np.number):
if cond == 0:
correct_out = node.inputs[2]
else:
correct_out = node.inputs[1]
if correct_out.ndim != node.outputs[0].ndim:
# TODO: broadcast?
return False
if correct_out.dtype != node.outputs[0].dtype:
out = tt.cast(correct_out, node.outputs[0].dtype)
else:
out = correct_out
if out.type.broadcastable != node.outputs[0].type.broadcastable:
# We need to copy data to the new dimensions during execution
# We should not depend on node.outputs as this would
# make the new node depend on the old one that will
# get optimized again. So this create a cycle.
shps = []
for idx, (b1, b2), in enumerate(
zip(out.type.broadcastable, node.outputs[0].type.broadcastable)
):
if b1 == b2:
shps.append(out.shape[idx])
elif not node.inputs[1].type.broadcastable[idx]:
shps.append(node.inputs[1].shape[idx])
else:
shps.append(node.inputs[2].shape[idx])
out = alloc(out, *shps)
else:
out = out
# Copy over stacktrace from selected output to new output
copy_stack_trace(node.outputs + correct_out, out)
return [out]
# if left is right -> left
if node.inputs[1] is node.inputs[2]:
# Note: No need to copy over stacktrace, because the input node
# already has its own stacktrace
if cond.type == node.inputs[1].type:
return [node.inputs[1]]
ret = fill(cond, node.inputs[1])
# Copy over stacktrace from switch output and correct branch
copy_stack_trace(node.outputs + node.inputs[1], ret)
return [ret]
# This case happens with scan.
# Elemwise{switch}(le(shape_i{id}(X), 0), 0, shape_i{id}(X)) -> shape_i{id}(X)
left = node.inputs[1]
right = node.inputs[2]
cond_var = node.inputs[0]
if (
cond_var.owner
and isinstance(cond_var.owner.op, Elemwise)
and isinstance(cond_var.owner.op.scalar_op, ts.LE)
and cond_var.owner.inputs[0].owner
and isinstance(cond_var.owner.inputs[0].owner.op, Shape_i)
and tt.extract_constant(
cond_var.owner.inputs[1], only_process_constants=True
)
== 0
and tt.extract_constant(left, only_process_constants=True) == 0
and right is cond_var.owner.inputs[0]
):
assert right.type == node.outputs[0].type
# No need to copy over stacktrace, because the right input node
# already has its own stacktrace
return [right]
return False
return False
@register_specialize
@register_canonicalize
@local_optimizer([mul])
def local_mul_switch_sink(node):
"""
This optimization makes the following changes in the graph:
T.mul(A,T.switch(cond,0,iff),B) --> T.switch(cond,0,T.mul(A,B,iff))
T.mul(A,T.switch(cond,ift,0),B) --> T.switch(cond,T.mul(A,B,ift),0)
A and B being several (or none) symbolic variables.
This is useful because A and B may not be numerically stable and give
NaN or inf values for cases where the switch returns 0.
With this optimization T.grad(T.switch(...)) has the right behavior.
Examples
--------
x -> f(x)
x -> g(x)
y = T.switch(cond,f(x),g(x))
**without the optimization
T.grad(y,x) -> grad(f(x),x) * grad(y,f(x)) + grad(g(x),x) * grad(y,g(x))
**with the optimization
T.grad(y,x) -> switch(cond,grad(f(x),x), 0) + switch(cond,0,grad(g(x),x))
This will be particularly useful for the lazyif because we skip
an entire part of the graph.
"""
if node.op != mul:
return False
for idx, i in enumerate(node.inputs):
if i.owner and i.owner.op == tt.switch:
switch = i.owner
try:
if (
get_scalar_constant_value(
switch.inputs[1], only_process_constants=True
)
== 0.0
):
listmul = node.inputs[:idx] + node.inputs[idx + 1 :]
fmul = mul(*(listmul + [switch.inputs[2]]))
# Copy over stacktrace for elementwise multiplication op
# from previous elementwise multiplication op.
# An error in the multiplication (e.g. errors due to
# inconsistent shapes), will point to the
# multiplication op.
copy_stack_trace(node.outputs, fmul)
fct = [tt.switch(switch.inputs[0], 0, fmul)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
# Copy over stacktrace for switch op from both previous
# elementwise multiplication op and previous switch op,
# because an error in this part can be caused by either
# of the two previous ops.
copy_stack_trace(node.outputs + switch.outputs, fct)
return fct
except NotScalarConstantError:
pass
try:
if (
get_scalar_constant_value(
switch.inputs[2], only_process_constants=True
)
== 0.0
):
listmul = node.inputs[:idx] + node.inputs[idx + 1 :]
fmul = mul(*(listmul + [switch.inputs[1]]))
# Copy over stacktrace for elementwise multiplication op
# from previous elementwise multiplication op.
# An error in the multiplication (e.g. errors due to
# inconsistent shapes), will point to the
# multiplication op.
copy_stack_trace(node.outputs, fmul)
fct = [tt.switch(switch.inputs[0], fmul, 0)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
# Copy over stacktrace for switch op from both previous
# elementwise multiplication op and previous switch op,
# because an error in this part can be caused by either
# of the two previous ops.
copy_stack_trace(node.outputs + switch.outputs, fct)
return fct
except NotScalarConstantError:
pass
return False
@register_canonicalize
@local_optimizer([true_div, int_div])
def local_div_switch_sink(node):
"""
This optimization makes the following changes in the graph:
T.div(T.switch(cond,0,iff),A) --> T.switch(cond,0,T.div(iff,A))
T.div(T.switch(cond,ift,0),A) --> T.switch(cond,T.div(ift,A),0)
A being a symbolic variable.
This is useful because A may not be numerically stable and give
NaN or inf values for cases where the switch returns 0.
See local_mul_switch_sink for more details.
"""
if node.op != true_div and node.op != int_div:
return False
op = node.op
if node.inputs[0].owner and node.inputs[0].owner.op == tt.switch:
switch = node.inputs[0].owner
try:
if (
get_scalar_constant_value(switch.inputs[1], only_process_constants=True)
== 0.0
):
fdiv = op(switch.inputs[2], node.inputs[1])
# Copy over stacktrace for elementwise division op
# from previous elementwise multiplication op.
# An error in the division (e.g. errors due to
# inconsistent shapes or division by zero),
# will point to the new division op.
copy_stack_trace(node.outputs, fdiv)
fct = [tt.switch(switch.inputs[0], 0, fdiv)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
# Copy over stacktrace for switch op from both previous
# elementwise division op and previous switch op,
# because an error in this part can be caused by either
# of the two previous ops.
copy_stack_trace(node.outputs + switch.outputs, fct)
return fct
except NotScalarConstantError:
pass
try:
if (
get_scalar_constant_value(switch.inputs[2], only_process_constants=True)
== 0.0
):
fdiv = op(switch.inputs[1], node.inputs[1])
# Copy over stacktrace for elementwise division op
# from previous elementwise multiplication op.
# An error in the division (e.g. errors due to
# inconsistent shapes or division by zero),
# will point to the new division op.
copy_stack_trace(node.outputs, fdiv)
fct = [tt.switch(switch.inputs[0], fdiv, 0)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
# Copy over stacktrace for switch op from both previous
# elementwise division op and previous switch op,
# because an error in this part can be caused by either
# of the two previous ops.
copy_stack_trace(node.outputs + switch.outputs, fct)
return fct
except NotScalarConstantError:
pass
return False
# Merge add/sub/mul/div/minimum/maximum/... of switches sharing the same
# condition, to enable further simplification of their branches
# Example: switch(c, a, b) + switch(c, x, y) -> switch(c, a+x, b+y)
@register_canonicalize
@local_optimizer([Elemwise])
def local_merge_switch_same_cond(node):
# node must be binary elemwise or add or mul
if not isinstance(node.op, Elemwise) or not isinstance(
node.op.scalar_op, (ts.BinaryScalarOp, ts.Add, ts.Mul)
):
return
# all inputs must be switch
if not all(
s.owner
and isinstance(s.owner.op, Elemwise)
and isinstance(s.owner.op.scalar_op, ts.Switch)
for s in node.inputs
):
return
# all switch conditions must be the same
cond = node.inputs[0].owner.inputs[0]
if not all(s.owner.inputs[0] is cond for s in node.inputs[1:]):
return
# pull out switch
return [
tt.switch(
cond,
node.op(*[s.owner.inputs[1] for s in node.inputs]),
node.op(*[s.owner.inputs[2] for s in node.inputs]),
)
]
#############
# Tile Opts #
#############
@register_useless
@register_canonicalize
@register_stabilize
@local_optimizer([Tile])
def local_useless_tile(node):
"""Tile(x, (1,)*N) -> x
This is useless tile. (1,)*N, just mean a vector with all element
being 1.
"""
if isinstance(node.op, Tile):
try:
a = tt.get_scalar_constant_value(
node.inputs[1], only_process_constants=True
)
if a == 1:
try:
l = tt.get_vector_length(node.inputs[1])
if l == node.inputs[0].ndim:
# No need to copy over any stacktrace as previous
# input variable already has a stacktrace
return [node.inputs[0]]
elif l < node.inputs[0].ndim:
# The Op don't support that case, so we can't
# implement the opt and test it.
return
return [node.inputs[0]]
else:
# The Op don't support that case, so we can't
# implement the opt and test it.
return
x_nd = node.inputs[0].ndim
broad = ["x"] * (l - x_nd) + range(x_nd)
ret = node.inputs[0].dimshuffle(broad)
# Copy over stacktrace from previous output node,
# and from node before tiling operation.
copy_stack_trace(node.outputs + node.inputs[0], ret)
return [ret]
except ValueError:
return
except NotScalarConstantError:
return
##############
# Split Opts #
##############
@register_useless
@register_canonicalize
@register_specialize
@local_optimizer([Split])
def local_useless_split(node):
"""Split{n_splits=1}(x, y) -> x
Remove Split with only 1 split.
"""
if isinstance(node.op, Split):
if node.op.len_splits == 1:
x, axis, splits = node.inputs
out = assert_op(x, tt.eq(splits.shape[0], 1))
# Copy over stacktrace from previous output node.
copy_stack_trace(node.outputs, out)
out2 = assert_op(out, tt.eq(x.shape[axis], splits[0]))
# Copy over stacktrace from previous output node.
copy_stack_trace(out, out2)
return [out2]
################
# Flatten Opts #
################
@register_canonicalize
@register_stabilize
@local_optimizer([Flatten])
def local_flatten_lift(node):
"""
Flatten(UnaryElemwise(x)) -> UnaryElemwise(Flatten(x))
This optimization is needed by optimization
nnet/sigm.py:log1msigm_to_softplus to get applied when there is a flatten.
"""
if (
isinstance(node.op, Flatten)
and node.inputs[0].owner
and isinstance(node.inputs[0].owner.op, Elemwise)
and len(node.inputs[0].owner.inputs) == 1
):
f = node.op(node.inputs[0].owner.inputs[0])
# Copy over stacktrace from previous output node (flatten op),
# since this is the op which may cause an error for f.
copy_stack_trace(node.outputs, f)
e = node.inputs[0].owner.op(f)
# Copy over stacktrace from previous output node and from unary
# elementwise output node since if there was an error, it would
# probably have come from that operation.
copy_stack_trace(node.outputs + [node.inputs[0]], e)
return [e]
##################
# Reshape opts #
##################
def local_reshape_chain(op):
@local_optimizer([op])
def f(node):
"""
Reshape(Reshape(shape1),shape2) -> Reshape(shape2)
"""
if not opt.check_chain(node, op, op):
return False
# TODO: this can permit a failing program to run by eliminating
# the lower reshape
rval = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])
# Copy over stacktrace from previous output node, as any error
# in new computational graph would have been caused by last op
# in the old computational graph.
copy_stack_trace(node.outputs, rval)
# It might happen that the desired output of this node has a
# broadcastable pattern that does not match that of 'rval'. This is
# when originally, we were able to figure out that one of the
# dimensions of the reshape is one, but some other transformation
# replaced the shape by one for which this cannot be guessed.
# We should try to figure out why we lost the information about this
# constant value... but in the meantime, better not apply this
# optimization.
if rval.broadcastable == node.outputs[0].broadcastable:
return [rval]
else:
return False
return f
register_canonicalize(local_reshape_chain(Reshape), name="local_reshape_chain")
@register_useless
@register_canonicalize
@register_stabilize
@local_optimizer([Reshape])
def local_useless_reshape(node):
"""
Remove two kinds of useless reshape.
Remove Reshape when both the input and output have a single dimension.
Remove Reshape when reshaping to the shape of the input.
"""
op = node.op
if not isinstance(op, Reshape):
return False
input = node.inputs[0]
output = node.outputs[0]
output_shape = node.inputs[1]
if input.ndim != output.ndim:
return False
# Simple case: both input and output have a single dimension.
# This could hide errors if the user provides inconsistent shapes.
if (
input.ndim == 1
and output.ndim == 1
and input.broadcastable == output.broadcastable
):
return [input]
# Second case: all the shapes match the input shape
# Match Reshape(x, x.shape)
if output_shape.owner and isinstance(output_shape.owner.op, Shape):
shape_input = output_shape.owner.inputs[0]
if shape_input == input:
return [input]
# Match Reshape(x, [x.shape[0], ..., x.shape[-1]]), accounting for
# broadcastable and constant dimensions
if output_shape.owner and isinstance(output_shape.owner.op, MakeVector):
output_shape_is = output_shape.owner.inputs
if not hasattr(node, "fgraph"):
shape_feature = None
else:
shape_feature = getattr(node.fgraph, "shape_feature", None)
nb_m1 = 0
shape_match = [False] * input.ndim
for dim in range(input.ndim):
outshp_i = output_shape_is[dim]
# Match Shape_i{dim}(input)
if (
outshp_i.owner
and isinstance(outshp_i.owner.op, Shape_i)
and outshp_i.owner.op.i == dim
and outshp_i.owner.inputs[0] == input
):
shape_match[dim] = True
continue
# Match Shape(input)[dim]
if (
outshp_i.owner
and isinstance(outshp_i.owner.op, Subtensor)
and len(outshp_i.owner.inputs) == 2
and extract_constant(outshp_i.owner.inputs[1]) == dim
):
subtensor_inp = outshp_i.owner.inputs[0]
if subtensor_inp.owner and isinstance(subtensor_inp.owner.op, Shape):
shape_input_i = subtensor_inp.owner.inputs[0]
if shape_input_i == input:
shape_match[dim] = True
continue
# Match 1 if input.broadcastable[dim] is True
cst_outshp_i = extract_constant(outshp_i, only_process_constants=1)
if input.broadcastable[dim] and cst_outshp_i == 1:
shape_match[dim] = True
continue
# Match -1
if cst_outshp_i == -1:
shape_match[dim] = True
nb_m1 += 1
continue
# Match shape_of[input][dim] or its constant equivalent
if shape_feature:
inpshp_i = shape_feature.get_shape(input, dim)
if inpshp_i == outshp_i or (
extract_constant(inpshp_i, only_process_constants=1)
== extract_constant(outshp_i, only_process_constants=1)
):
shape_match[dim] = True
continue
if all(shape_match) and nb_m1 <= 1:
return [input]
# TODO later: if all the shapes except one match, we may want to
# consider it useless as well, like we do in the 1-dim case.
@register_canonicalize
@local_optimizer([Reshape])
def local_reshape_to_dimshuffle(node):
"""
Broadcastable dimensions in Reshape are replaced with dimshuffle.
The goal is to avoid using reshape to add or remove broadcastable
dimensions, but use dimshuffle instead, so dimshuffles can cancel out
or be removed later on.
For example:
- reshape(x, (1, n)) --> dimshuffle{x,0}(reshape(x, (n,))
- reshape(x, (1, m, 1, n, 1, 1))
--> dimshuffle{x,0,x,1,x,x}(reshape(x, (m, n)))
"""
op = node.op
if not isinstance(op, Reshape):
return False
input = node.inputs[0]
output = node.outputs[0]
output_shape = node.inputs[1]
dimshuffle_new_order = []
new_output_shape = []
index = 0 # index over the output of the new reshape
for i in range(output.ndim):
# Since output_shape is a symbolic vector, we trust extract_constant
# to go through however it is formed to see if its i-th element is 1.
# We need only_process_constants=False for that.
dim = extract_constant(
output_shape[i], only_process_constants=False, elemwise=False
)
if dim == 1:
dimshuffle_new_order.append("x")
else:
dimshuffle_new_order.append(index)
new_output_shape.append(dim)
index = index + 1
if index != output.ndim:
inner = op.__class__(len(new_output_shape))(input, new_output_shape)
copy_stack_trace(output, inner)
new_node = [DimShuffle(inner.type.broadcastable, dimshuffle_new_order)(inner)]
copy_stack_trace(output, new_node)
return new_node
@register_canonicalize
@register_stabilize
@local_optimizer([Reshape])
def local_reshape_lift(node):
"""
Reshape(UnaryElemwise(x)) -> UnaryElemwise(Reshape(x))
This optimization is needed by optimization
nnet/sigm.py:log1msigm_to_softplus to get applied when there is a reshape.
"""
if (
isinstance(node.op, Reshape)
and node.inputs[0].owner
and isinstance(node.inputs[0].owner.op, Elemwise)
and len(node.inputs[0].owner.inputs) == 1
):
r = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])
# Copy stacktrace from previous Reshape op, as an error in new
# Reshape op could only have been caused by old one.
copy_stack_trace(node.outputs, r)
e = node.inputs[0].owner.op(r)
# Copy stacktrace from both previous Reshape and UnaryElemwise op
# because an error in new cg could have been caused by either ops.
copy_stack_trace(node.outputs + node.inputs, e)
# In rare case the original broadcast was (False, True), but
# the new one is (False, False). So don't crash in that case.
if e.type != node.outputs[0].type:
re = tt.patternbroadcast(e, node.outputs[0].broadcastable)
# Copy over stack trace.
# If the graph fails it is usually due to the fact that a dimension
# that should be broadcastable does not actually have length 1,
copy_stack_trace(e, re)
else:
re = e
return [re]
##################
# Middleman cuts #
##################
register_canonicalize(OpRemove(tensor_copy), name="remove_tensor_copy")
################
# Canonization #
################
class Canonizer(LocalOptimizer):
r"""
Simplification tool. The variable is a local_optimizer. It is best used
with a TopoOptimizer in in_to_out order.
Usage: Canonizer(main, inverse, reciprocal, calculate)
Parameters
----------
main
A suitable Op class that is commutative, associative and
takes one to an arbitrary number of inputs, e.g. add or
mul
inverse
An Op class such that inverse(main(x, y), y) == x
e.g. sub or true_div
reciprocal
A function such that main(x, reciprocal(y)) == inverse(x, y)
e.g. neg or inv
calculate
Function that takes a list of numpy.ndarray instances
for the numerator, another list for the denumerator,
and calculates inverse(main(\*num), main(\*denum)). It
takes a keyword argument, aslist. If True, the value
should be returned as a list of one element, unless
the value is such that value = main(). In that case,
the return value should be an empty list.
Examples
--------
>>> import theano.tensor as tt
>>> from theano.tensor.opt import Canonizer
>>> add_canonizer = Canonizer(add, sub, neg, \\
... lambda n, d: sum(n) - sum(d))
>>> mul_canonizer = Canonizer(mul, true_div, inv, \\
... lambda n, d: prod(n) / prod(d))
Examples of optimizations mul_canonizer can perform:
| x / x -> 1
| (x * y) / x -> y
| x / y / x -> 1 / y
| x / y / z -> x / (y * z)
| x / (y / z) -> (x * z) / y
| (a / b) * (b / c) * (c / d) -> a / d
| (2.0 * x) / (4.0 * y) -> (0.5 * x) / y
| 2 * x / 2 -> x
| x * y * z -> Elemwise(mul){x,y,z} #only one pass over the memory.
| !-> Elemwise(mul){x,Elemwise(mul){y,z}}
"""
def __init__(self, main, inverse, reciprocal, calculate, use_reciprocal=True):
self.main = main
self.inverse = inverse
self.reciprocal = reciprocal
self.calculate = calculate
self.use_reciprocal = use_reciprocal
self.external_simplifiers = []
def add_simplifier(self, simplifier, reason):
self.external_simplifiers.append((reason, simplifier))
def tracks(self):
return [self.main, self.inverse, self.reciprocal]
def get_num_denum(self, input):
r"""
This extract two lists, num and denum, such that the input is:
self.inverse(self.main(\*num), self.main(\*denum)). It returns
the two lists in a (num, denum) pair.
For example, for main, inverse and reciprocal = \*, / and inv(),
| input -> returned value (num, denum)
| x*y -> ([x, y], [])
| inv(x) -> ([], [x])
| inv(x) * inv(y) -> ([], [x, y])
| x*y/z -> ([x, y], [z])
| log(x) / y * (z + x) / y -> ([log(x), z + x], [y, y])
| (((a / b) * c) / d) -> ([a, c], [b, d])
| a / (b / c) -> ([a, c], [b])
| log(x) -> ([log(x)], [])
| x**y -> ([x**y], [])
| x * y * z -> ([x, y, z], [])
"""
# This function is recursive. The idea is that there is a
# get_num_denum recursion in which the internal ops are all
# one of (main, inverse, reciprocal, DimShuffle) and the
# internal data nodes all have the dtype of the 'input'
# argument. The leaf-Variables of the graph covered by the
# recursion may be of any Variable type.
if input.owner is None or input.owner.op not in [
self.main,
self.inverse,
self.reciprocal,
]:
if input.owner and isinstance(input.owner.op, tt.DimShuffle):
# If input is a DimShuffle of some input which does
# something like this:
# * change a vector of length N into a 1xN row matrix
# * change a scalar into a 1x1x1 tensor
# * in general, complete the shape of a tensor
# with broadcastable 1s to the *left*
# Then we will simply discard the DimShuffle and return
# the num/denum of its input
dsn = input.owner # dimshuffle node
dsop = dsn.op # dimshuffle op
# the first input of the dimshuffle i.e. the ndarray to redim
dsi0 = dsn.inputs[0]
# The compatible order is a DimShuffle "new_order" of the form:
# ('x', ..., 'x', 0, 1, 2, ..., dimshuffle_input.type.ndim)
# That kind of DimShuffle only adds broadcastable
# dimensions on the left, without discarding any
# existing broadcastable dimension and is inserted
# automatically by Elemwise when the inputs have
# different numbers of dimensions (hence why we can
# discard its information - we know we can retrieve it
# later on).
compatible_order = ("x",) * (input.type.ndim - dsi0.type.ndim) + tuple(
range(dsi0.type.ndim)
)
if dsop.new_order == compatible_order:
# If the "new_order" is the one we recognize,
# we return the num_denum of the dimshuffled input.
return self.get_num_denum(input.owner.inputs[0])
else:
# This is when the input isn't produced by main,
# inverse or reciprocal.
return [input], []
else:
return [input], []
num = []
denum = []
parent = input.owner
# We get the (num, denum) pairs for each input
# pairs = [self.get_num_denum(input2) if input2.type.dtype ==
# input.type.dtype else ([input2], []) for input2 in
# parent.inputs]
pairs = [self.get_num_denum(input2) for input2 in parent.inputs]
if parent.op == self.main:
# If we have main(x, y, ...), numx, denumx, numy, denumy, ...
# then num is concat(numx, numy, num...) and denum is
# concat(denumx, denumy, denum...) note that main() can have any
# number of arguments >= 0 concat is list concatenation
num = reduce(list.__iadd__, map(operator.itemgetter(0), pairs))
denum = reduce(list.__iadd__, map(operator.itemgetter(1), pairs))
elif parent.op == self.inverse:
# If we have inverse(x, y), numx, denumx, numy and denumy
# then num is concat(numx, denumy) and denum is
# concat(denumx, numy) note that inverse() is binary
num = pairs[0][0] + pairs[1][1]
denum = pairs[0][1] + pairs[1][0]
elif parent.op == self.reciprocal:
# If we have reciprocal(x), numx, denumx
# then num is denumx and denum is numx
# note that reciprocal() is unary
num = pairs[0][1]
denum = pairs[0][0]
return num, denum
def merge_num_denum(self, num, denum):
r"""
Utility function which takes two lists, num and denum, and
returns something which is equivalent to inverse(main(\*num),
main(\*denum)), but depends on the length of num and the length
of denum (in order to minimize the number of operations).
Let n = len(num) and d = len(denum):
| n=0, d=0: neutral element (given by self.calculate([], []))
| (for example, this would be 0 if main is addition
| and 1 if main is multiplication)
| n=1, d=0: num[0]
| n=0, d=1: reciprocal(denum[0])
| n=1, d=1: inverse(num[0], denum[0])
| n=0, d>1: reciprocal(main(\*denum))
| n>1, d=0: main(\*num)
| n=1, d>1: inverse(num[0], main(\*denum))
| n>1, d=1: inverse(main(\*num), denum[0])
| n>1, d>1: inverse(main(\*num), main(\*denum))
Given the values of n and d to which they are associated, all
of the above are equivalent to:
inverse(main(\*num), main(\*denum))
"""
ln, ld = len(num), len(denum)
if not ln and not ld:
return tt.as_tensor_variable(self.calculate([], []))
if not ln:
if self.use_reciprocal:
return self.reciprocal(self.merge_num_denum(denum, []))
else:
ln = [self.calculate([], [], aslist=False)]
if not ld:
if ln == 1:
# num[0] should always be a variable
assert isinstance(num[0], gof.Variable)
return num[0]
else:
return self.main(*num)
return self.inverse(
self.merge_num_denum(num, []), self.merge_num_denum(denum, [])
)
@staticmethod
def get_constant(v):
"""
Returns
-------
object
A numeric constant if v is a Constant or, well, a
numeric constant. If v is a plain Variable, returns None.
"""
if isinstance(v, Constant):
if getattr(v.tag, "unique_value", None) is not None:
data = v.tag.unique_value
else:
data = v.data
if data.ndim == 0:
return data
else:
return None
elif isinstance(v, Variable):
return None
else:
return v
def simplify(self, num, denum, out_type):
"""
Shorthand for:
.. code-block:: python
self.simplify_constants(*self.simplify_factors(num, denum))
"""
rval = self.simplify_constants(
*self.simplify_factors(num, denum), out_type=out_type
)
for reason, simplifier in self.external_simplifiers:
# TODO: document that 'reason' is associated with this
# simplification to help auditing when things go
# wrong
rval = simplifier(*rval)
return rval
def simplify_factors(self, num, denum):
"""
For any Variable r which is both in num and denum, removes it
from both lists. Modifies the lists inplace. Returns the
modified lists. For example:
| [x], [x] -> [], []
| [x, y], [x] -> [y], []
| [a, b], [c, d] -> [a, b], [c, d]
"""
ln = len(num)
ld = len(denum)
if ld > 2 and ln > 2:
# Faster version for "big" inputs.
while True:
s = set(num)
# Inputs can appear multiple times
redo = len(s) != len(num)
inter = s.intersection(denum)
for v in inter:
num.remove(v)
denum.remove(v)
if not redo or not inter:
break
else:
for v in list(num):
if v in denum:
num.remove(v)
denum.remove(v)
return num, denum
def simplify_constants(self, orig_num, orig_denum, out_type=None):
"""
Find all constants and put them together into a single constant.
Finds all constants in orig_num and orig_denum (using
get_constant) and puts them together into a single
constant. The constant is inserted as the first element of the
numerator. If the constant is the neutral element, it is
removed from the numerator.
Examples
--------
Let main be multiplication:
| [2, 3, x], [] -> [6, x], []
| [x, y, 2], [4, z] -> [0.5, x, y], [z]
| [x, 2, y], [z, 2] -> [x, y], [z]
"""
# Lists representing the numerator and denumerator
num, denum = [], []
# Lists representing the *constant* elements of num and denum
numct, denumct = [], []
for v in orig_num:
ct = self.get_constant(v)
if ct is not None:
# We found a constant in the numerator!
# We add it to numct
numct.append(ct)
else:
num.append(v)
for v in orig_denum:
ct = self.get_constant(v)
if ct is not None:
denumct.append(ct)
else:
denum.append(v)
if self.use_reciprocal or num:
# This will calculate either:
# [inverse(main(*numct), main(*denumct))]
# [] - if inverse(main(*numct), main(*denumct)) is the
# neutral element
ct = self.calculate(numct, denumct, aslist=True, out_type=out_type)
else:
# This happens if we don't allow the reciprocal and the
# numerator is empty. That means we will need to represent
# reciprocal(x) like inverse(neutral_element, x) so
# we can't allow ct == []
# TODO: why is this branch needed when merge_num_denum
# does it for us?
ct = [self.calculate(numct, denumct, aslist=False, out_type=out_type)]
# Wrapping ct in a Constant with the right dtype
ct = [tt.constant(c, dtype=out_type.dtype) for c in ct]
if orig_num and len(numct) == 1 and len(denumct) == 0 and ct:
# In that case we should only have one constant in `ct`.
assert len(ct) == 1
first_num_ct = self.get_constant(orig_num[0])
if first_num_ct is not None and ct[0].type.values_eq(
ct[0].data, first_num_ct
):
# This is an important trick :( if it so happens that:
# * there's exactly one constant on the numerator and none on
# the denominator
# * it's not the neutral element (ct is an empty list in that
# case)
# * the constant is the same as the first argument in the
# numerator (we only check the first argument because the
# canonizer puts the computed constants first)
# -> then we return very exactly the original num/denum.
# If we don't do that the optimizer will just loop
# infinitely because it will not catch on that there are
# no changes to be made and every time it will want to
# replace something by the same thing...
# Note that it is important to use `values_eq` instead of
# the == operator, to handle NaN values correctly.
return orig_num, orig_denum
return ct + num, denum
def transform(self, node):
op = node.op
if op not in [self.main, self.inverse, self.reciprocal]:
return False
assert len(node.outputs) == 1
out = node.outputs[0]
# out won't have a clients field when we didn't commit a
# started change in the graph. We can't do the check if we
# want to skip it, so we force the skip it. It should be
# reapplied later.
if not hasattr(out, "clients"):
return
# check if any of the clients of this node would be part of
# this canonized graph... if so, we do nothing and wait for
# them to be transformed.
for c, c_idx in out.clients:
if c == "output":
continue
while (
isinstance(getattr(c, "op", None), DimShuffle)
and len(c.outputs[0].clients) <= 1
):
c = c.outputs[0].clients[0][0]
if getattr(c, "op", "") in [self.main, self.inverse, self.reciprocal]:
return False
# Here we make the canonical version of the graph around this node
# See the documentation of get_num_denum and simplify
orig_num, orig_denum = self.get_num_denum(node.outputs[0])
num, denum = self.simplify(list(orig_num), list(orig_denum), out.type)
def same(x, y):
return len(x) == len(y) and all(np.all(xe == ye) for xe, ye in zip(x, y))
if (
same(orig_num, num)
and same(orig_denum, denum)
and
# Check to see if we've collapsed some nested ops.
not (
len(orig_denum) == 0
and
# Make sure this change would increase the number of vector
# arguments--decreasing the number of unnecessary `self.main`
# nodes.
len(node.inputs) < len(orig_num)
)
and
# Do a similar check for the reciprocal op.
not (
self.use_reciprocal
and node.op == self.reciprocal
and len(orig_num) == 0
and node.inputs[0].owner
and len(node.inputs[0].owner.inputs) < len(orig_denum)
)
):
return False
new = self.merge_num_denum(num, denum)
if new.type.dtype != out.type.dtype:
new = tt.cast(new, out.type.dtype)
assert (new.type == out.type) == (not (new.type != out.type))
if not (new.type == out.type):
new = _fill_chain(new, node.inputs)[0]
if new.type == out.type:
# This happen with test
# theano/tensor/tests/test_opt.py:T_local_switch_sink
new.tag.values_eq_approx = values_eq_approx_remove_inf_nan
# We need to implement the copy over of the stacktrace.
# See issue #5104.
return [new]
else:
_logger.warning(
" ".join(
(
"CANONIZE FAILED: new, out = ",
new,
",",
out,
"types",
new.type,
",",
out.type,
)
)
)
return False
def __str__(self):
return getattr(
self,
"name",
"Canonizer({}, {}, {})".format(self.main, self.inverse, self.reciprocal),
)
def mul_calculate(num, denum, aslist=False, out_type=None):
if not num and not denum:
# Smallest 1 possible.
if aslist:
return []
else:
return np.int8(1)
# Make sure we do not accidentally upcast data types.
if out_type is None:
out_dtype = ts.upcast(*[v.dtype for v in (num + denum)])
else:
out_dtype = out_type.dtype
one = theano._asarray(1, dtype=out_dtype)
v = reduce(np.multiply, num, one) / reduce(np.multiply, denum, one)
if aslist:
if np.all(v == 1):
return []
else:
return [v]
return v
local_mul_canonizer = Canonizer(mul, true_div, inv, mul_calculate, False)
register_canonicalize(local_mul_canonizer, name="local_mul_canonizer")
@local_optimizer([neg])
def local_neg_to_mul(node):
if node.op == neg:
return [mul(np.array(-1, dtype=node.inputs[0].dtype), node.inputs[0])]
register_canonicalize(local_neg_to_mul)
@register_specialize
@local_optimizer([Sum, Prod])
def local_sum_prod_mul_by_scalar(node):
"""
sum(scalar * smth) -> scalar * sum(smth)
sum(-smth) -> -sum(smth)
or
prod(scalar * smth) -> scalar ** size(smth) * prod(smth)
prod(-smth) -> -1 ** size(smth) * prod(smth)
"""
# TODO: if the the thing inside the Sum is a division,
# we should get at the numerator....
if isinstance(node.op, (Sum, Prod)):
(node_inps,) = node.inputs
if node_inps.owner and node_inps.owner.op == mul:
terms = node_inps.owner.inputs
scalars = [t.dimshuffle() for t in terms if np.all(t.type.broadcastable)]
if len(scalars) == 0:
# Nothing to optimize here
return
non_scalars = [t for t in terms if not np.all(t.broadcastable)]
# Perform the op only on the non-scalar inputs, if applicable
if len(non_scalars) == 0:
new_op_input_nb_elements = 1
new_op_output = 1
elif len(non_scalars) == 1:
new_op_input_nb_elements = non_scalars[0].size
new_op_output = node.op(non_scalars[0])
else:
new_op_input = mul(*non_scalars)
# We assume that errors always come from the prod/mul op in the
# original computational graph, and therefore need to only
# copy over its output stacktrace.
copy_stack_trace(node.outputs, new_op_input)
new_op_input_nb_elements = new_op_input.size
new_op_output = node.op(new_op_input)
if not len(non_scalars) == 0:
# Copy over stacktrace from previous output to new mul op,
# for same reason as above.
copy_stack_trace(node.outputs, new_op_output)
# If node.op is a T.elemwise.Prod, then the scalars need to be
# raised to the power of the number of elements in the input
# to the Prod
if isinstance(node.op, Prod) and new_op_input_nb_elements != 1:
scalars = [s ** new_op_input_nb_elements for s in scalars]
# Scale the output of the op by the scalars and return as
# replacement for the original output
mul_inputs = scalars
if new_op_input_nb_elements != 1:
mul_inputs.append(new_op_output)
if len(mul_inputs) == 1:
# Copy over stacktrace from previous output to new mul op,
# for same reason as above.
copy_stack_trace(node.outputs, mul_inputs)
return mul_inputs
else:
ret = mul(*mul_inputs)
# Copy over stacktrace from previous output to new mul op,
# for same reason as above.
copy_stack_trace(node.outputs, [ret] + mul_inputs)
return [ret]
if isinstance(node.op, Sum) and node_inps.owner and node_inps.owner.op == neg:
s = node.op(node_inps.owner.inputs[0])
ret = neg(s)
# There are never errors in the negative op, thus
# we need only to copy over stacktrace from previous output node to
# the two new ops.
copy_stack_trace(node.outputs, [s, ret])
return [ret]
@register_specialize
@local_optimizer([Elemwise])
def local_elemwise_sub_zeros(node):
"""
Elemwise{sub}(X,X) -> zeros_like(X)
"""
if (
isinstance(node.op, Elemwise)
and node.op.scalar_op.nin == 2
and node.op.scalar_op == ts.sub
and node.inputs[0] == node.inputs[1]
):
res = tt.zeros_like(node.inputs[0])
# Copy over stacktrace from previous output.
# This could help for failures due to out-of-memory.
copy_stack_trace(node.outputs, res)
return [res]
@register_useless
@register_specialize
@register_stabilize
@register_canonicalize
@local_optimizer([Elemwise])
def local_useless_elemwise_comparison(node):
"""...
:note: These cases appear in the graph generated by scan.
These optimizations will make the graph easier to read.
# Comparing to itself is constant
Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)
Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)
Elemwise[{minimum,maximum}](X, X) -> X
# Comparing shape to 0 can be constant
Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)
Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)
Elemwise[maximum](X.shape[i], 0) -> X.shape[i]
Elemwise[maximum](0, X.shape[i]) -> X.shape[i]
Elemwise[minimum](X.shape[i], 0) -> 0
Elemwise[minimum](0, X.shape[i]) -> 0
# The shape can be replaced with sum of shapes
Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)
Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)
# Shapes are never negative
# Needed by Reshape.infer_shape
Elemwise[EQ](Subtensor(Shape(x)), -N) -> Elemwise[zeros](X)
"""
if not isinstance(node.op, Elemwise):
return
if node.op.scalar_op.nin != 2:
return
# We call zeros_like and one_like with opt=True to generate a
# cleaner graph.
dtype = node.outputs[0].dtype
# Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)
if (
isinstance(node.op.scalar_op, (ts.LT, ts.GT))
and node.inputs[0] is node.inputs[1]
):
res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
# Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)
if (
isinstance(node.op.scalar_op, (ts.LE, ts.GE))
and node.inputs[0] is node.inputs[1]
):
res = tt.ones_like(node.inputs[0], dtype=dtype, opt=True)
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
# Elemwise[{minimum,maximum}](X, X) -> X
if (
isinstance(node.op.scalar_op, (ts.Minimum, ts.Maximum))
and node.inputs[0] is node.inputs[1]
):
res = node.inputs[0]
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
# Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)
if (
isinstance(node.op.scalar_op, ts.LT)
and node.inputs[0].owner
and isinstance(node.inputs[0].owner.op, Shape_i)
and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0
):
res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
# Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)
if (
isinstance(node.op.scalar_op, ts.GE)
and node.inputs[0].owner
and isinstance(node.inputs[0].owner.op, Shape_i)
and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0
):
res = tt.ones_like(node.inputs[0], dtype=dtype, opt=True)
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
# Elemwise[maximum](X.shape[i], 0) -> X.shape[i]
if (
isinstance(node.op.scalar_op, ts.Maximum)
and node.inputs[0].owner
and isinstance(node.inputs[0].owner.op, Shape_i)
and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0
):
# No need to copy over stacktrace.
return [node.inputs[0]]
# Elemwise[maximum](0, X.shape[i]) -> X.shape[i]
if (
isinstance(node.op.scalar_op, ts.Maximum)
and tt.extract_constant(node.inputs[0], only_process_constants=True) == 0
and node.inputs[1].owner
and isinstance(node.inputs[1].owner.op, Shape_i)
):
# No need to copy over stacktrace.
return [node.inputs[1]]
# Elemwise[minimum](X.shape[i], 0) -> 0
if (
isinstance(node.op.scalar_op, ts.Minimum)
and node.inputs[0].owner
and isinstance(node.inputs[0].owner.op, Shape_i)
and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0
):
res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
# Elemwise[minimum](0, X.shape[i]) -> 0
if (
isinstance(node.op.scalar_op, ts.Minimum)
and tt.extract_constant(node.inputs[0], only_process_constants=True) == 0
and node.inputs[1].owner
and isinstance(node.inputs[1].owner.op, Shape_i)
):
res = tt.zeros_like(node.inputs[1], dtype=dtype, opt=True)
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
# Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)
if (
isinstance(node.op.scalar_op, ts.LT)
and node.inputs[0].owner
and isinstance(node.inputs[0].owner.op, Elemwise)
and isinstance(node.inputs[0].owner.op.scalar_op, ts.Add)
and all(
[
isinstance(var.owner and var.owner.op, Shape_i)
for var in node.inputs[0].owner.inputs
]
)
and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0
):
res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
# Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)
if (
isinstance(node.op.scalar_op, ts.GE)
and node.inputs[0].owner
and isinstance(node.inputs[0].owner.op, Elemwise)
and isinstance(node.inputs[0].owner.op.scalar_op, ts.Add)
and all(
[
isinstance(var.owner and var.owner.op, Shape_i)
for var in node.inputs[0].owner.inputs
]
)
and tt.extract_constant(node.inputs[1], only_process_constants=True) == 0
):
res = tt.ones_like(node.inputs[0], dtype=dtype, opt=True)
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
# Elemwise[EQ](Subtensor(Shape(x)), -N)
# Elemwise[EQ](somegraph that only depend of shape, -N)
# TODO: handle the case where the -N is on either side
"""
|Elemwise{eq,no_inplace} [id B] ''
| |Subtensor{int64} [id C] ''
| | |Join [id D] ''
| | | |TensorConstant{0} [id E]
| | | |Subtensor{int64:int64:} [id F] ''
| | | | |Shape [id G] ''
"""
def investigate(node):
" Return True if values will be shapes, so >= 0"
if isinstance(node.op, (Shape, Shape_i)):
return True
elif isinstance(node.op, Subtensor) and node.inputs[0].owner:
return investigate(node.inputs[0].owner)
elif isinstance(node.op, Join):
return all(v.owner and investigate(v.owner) for v in node.inputs[1:])
elif isinstance(node.op, MakeVector):
return all(v.owner and investigate(v.owner) for v in node.inputs)
if (
isinstance(node.op.scalar_op, ts.EQ)
and node.inputs[0].owner
and investigate(node.inputs[0].owner)
):
try:
cst = get_scalar_constant_value(node.inputs[1], only_process_constants=True)
res = tt.zeros_like(node.inputs[0], dtype=dtype, opt=True)
if cst < 0:
# Copy over stacktrace from previous output.
copy_stack_trace(node.outputs, res)
return [res]
except NotScalarConstantError:
pass
return
@register_canonicalize
@register_specialize
@local_optimizer([Sum, Prod])
def local_sum_prod_div_dimshuffle(node):
"""
sum(a / dimshuffle{...}(b), axis=l) -> sum(a, axis={...}) / b,
if dimension l of the DimShuffle is 'x'
or
prod(a / dimshuffle{...}(b), axis=l) ->
prod(a, axis={...}) / b ** a.shape[l],
if dimension l of the DimShuffle is 'x'
"""
# It does not make much sense now to extend it to the case where the
# dimshuffle is in the numerator, since elemwise inversion of the
# denominator would still be needed before the summation or production.
if isinstance(node.op, (Sum, Prod)):
axis = node.op.axis
if axis is None:
axis = list(range(node.inputs[0].ndim))
node_input = node.inputs[0]
if node_input.owner and node_input.owner.op == true_div:
numerator, denominator = node_input.owner.inputs
# Old, bugged logic, reproduced here only to warn users
if (
config.warn.sum_div_dimshuffle_bug
and isinstance(node.op, Sum)
and numerator.owner
and isinstance(numerator.owner.op, tt.DimShuffle)
):
# Check compatibility
new_order = numerator.owner.op.new_order
compatible_dims = True
for ax in axis:
if len(new_order) <= ax or new_order[ax] != "x":
compatible_dims = False
break
if compatible_dims:
_logger.warning(
"WARNING: Your current code is fine, but"
" Theano versions between "
"rev. 3bd9b789f5e8 (2010-06-16) and"
" cfc6322e5ad4 (2010-08-03) would "
"have given an incorrect result. "
"To disable this warning, set the Theano"
" flag warn.sum_div_dimshuffle_bug to"
" False."
)
if denominator.owner and isinstance(denominator.owner.op, tt.DimShuffle):
dimshuffle_input = denominator.owner.inputs[0]
dimshuffle_order = denominator.owner.op.new_order
compatible_dims = []
incompatible_dims = []
for ax in axis:
if ax < len(dimshuffle_order) and dimshuffle_order[ax] == "x":
compatible_dims.append(ax)
else:
incompatible_dims.append(ax)
reordered_incompatible_dims = []
for ic_ax in incompatible_dims:
reordered_incompatible_dims.append(
ic_ax - sum([1 for c_ax in compatible_dims if c_ax < ic_ax])
)
if len(compatible_dims) > 0:
optimized_dimshuffle_order = list(
ax
for i, ax in enumerate(dimshuffle_order)
if (i not in axis) or (ax != "x")
)
# Removing leading 'x' (since it will be done automatically)
while (
len(optimized_dimshuffle_order) > 0
and optimized_dimshuffle_order[0] == "x"
):
del optimized_dimshuffle_order[0]
# if optimized_dimshuffle_order is sorted with
# not 'x', then dimshuffle is useless.
if all(i == e for i, e in enumerate(optimized_dimshuffle_order)):
optimized_dimshuffle = dimshuffle_input
else:
optimized_dimshuffle = tt.DimShuffle(
dimshuffle_input.type.broadcastable,
optimized_dimshuffle_order,
)(dimshuffle_input)
if config.warn.sum_div_dimshuffle_bug and isinstance(
node.op, Sum
):
_logger.warning(
"WARNING: Your current code is fine,"
" but Theano versions between "
"rev. 3bd9b789f5e8 (2010-06-16) and"
" cfc6322e5ad4 (2010-08-03) would "
"have given an incorrect result. "
"To disable this warning, set the"
" Theano flag "
"warn.sum_div_dimshuffle_bug"
" to False."
)
if isinstance(node.op, Sum):
op_on_compatible_dims = tt.sum(numerator, axis=compatible_dims)
rval = true_div(op_on_compatible_dims, optimized_dimshuffle)
if len(reordered_incompatible_dims) > 0:
rval = tt.sum(rval, axis=reordered_incompatible_dims)
elif isinstance(node.op, Prod):
op_on_compatible_dims = tt.prod(numerator, axis=compatible_dims)
dtype = numerator.dtype
rval = true_div(
op_on_compatible_dims,
(
optimized_dimshuffle
** tt.prod(
[
numerator.shape[ax].astype(dtype)
for ax in compatible_dims
]
)
),
)
if len(reordered_incompatible_dims) > 0:
rval = tt.prod(rval, axis=reordered_incompatible_dims)
return [rval]
@register_canonicalize
@local_optimizer([Sum, Prod])
def local_sum_prod_all_to_none(node):
"""
Sum{0,1,...N} -> Sum{} or
Prod{0,1,...N} -> Prod{}
"""
if isinstance(node.op, Sum) or isinstance(node.op, Prod):
opt_type = Sum if isinstance(node.op, Sum) else Prod
# if all the axes are named, then use None as a shorthand
# this permits more merging
if node.op.axis is None:
return
if set(node.op.axis) == set(range(node.inputs[0].type.ndim)):
return [opt_type(axis=None, dtype=node.op.dtype)(node.inputs[0])]
@register_canonicalize
@local_optimizer([Sum, Prod])
def local_op_of_op(node):
"""
Prod(Prod()) -> single Prod()
or
Sum(Sum()) -> single Sum()
"""
if isinstance(node.op, Prod) or isinstance(node.op, Sum):
opt_type = Sum if isinstance(node.op, Sum) else Prod
(node_inps,) = node.inputs
out_dtype = node.op.dtype
# We manipulate the graph so this is done to make sure the opt
# doesn't affect other computations.
if len(node_inps.clients) == 1:
if node_inps.owner and (isinstance(node_inps.owner.op, node.op.__class__)):
# check to see either the inner or outer prod is doing a
# product over all axis, in which case we can remove it
if node_inps.owner.op.axis is None or node.op.axis is None:
return [opt_type(None, dtype=out_dtype)(node_inps.owner.inputs[0])]
# figure out which axes were in the original sum
newaxis = list(tuple(node_inps.owner.op.axis))
for i in node.op.axis:
new_i = i
for ii in node_inps.owner.op.axis:
if new_i >= ii:
new_i += 1
assert new_i not in newaxis
newaxis.append(new_i)
assert len(newaxis) == len(
list(node_inps.owner.op.axis) + list(node.op.axis)
)
# The old bugged logic. We keep it there to generate a warning
# when we generated bad code.
alldims = list(range(node_inps.owner.inputs[0].type.ndim))
alldims = [
d for i, d in enumerate(alldims) if i in node_inps.owner.op.axis
]
alldims = [d for i, d in enumerate(alldims) if i in node.op.axis]
newaxis_old = [
i
for i in range(node_inps.owner.inputs[0].type.ndim)
if i not in alldims
]
if (
theano.config.warn.sum_sum_bug
and newaxis != newaxis_old
and len(newaxis) == len(newaxis_old)
):
_logger.warning(
"WARNING (YOUR CURRENT CODE IS FINE): Theano "
"versions between version 9923a40c7b7a and August "
"2nd, 2010 generated bugged code in this case. "
"This happens when there are two consecutive sums "
"in the graph and the intermediate sum is not "
"used elsewhere in the code. Some safeguard "
"removed some bad code, but not in all cases. You "
"are in one such case. To disable this warning "
"(that you can safely ignore since this bug has "
"been fixed) set the theano flag "
"`warn.sum_sum_bug` to False."
)
combined = opt_type(newaxis, dtype=out_dtype)
return [combined(node_inps.owner.inputs[0])]
ALL_REDUCE = [
CAReduce,
All,
Any,
Sum,
Prod,
ProdWithoutZeros,
] + CAReduce.__subclasses__()
@register_canonicalize
@register_uncanonicalize # Needed for MaxAndArgmax -> CAReduce
@local_optimizer(ALL_REDUCE)
def local_reduce_join(node):
"""
Reduce{scalar.op}(Join(axis=0, a, b), axis=0) -> Elemwise{scalar.op}(a, b)
Notes
-----
Supported scalar.op are Maximum, Mimimum in some cases and Add and Mul in
all cases.
Currently we must reduce on axis 0. It is probably extensible to the case
where we join and reduce on the same set of axis.
"""
if (
isinstance(node.op, tt.CAReduce)
and node.inputs[0].owner
and isinstance(node.inputs[0].owner.op, Join)
):
join = node.inputs[0].owner
if tt.extract_constant(join.inputs[0], only_process_constants=True) != 0:
return
if isinstance(node.op.scalar_op, (ts.Maximum, ts.Minimum)):
# Support only 2 inputs for now
if len(join.inputs) != 3:
return
elif not isinstance(node.op.scalar_op, (ts.Add, ts.Mul)):
return
elif len(join.inputs) <= 2:
# This is a useless join, that will get removed by another opt.
return
new_inp = []
for inp in join.inputs[1:]:
inp = inp.owner
if not inp:
return
if not isinstance(inp.op, DimShuffle) or inp.op.new_order != ("x",) + tuple(
range(inp.inputs[0].ndim)
):
return
new_inp.append(inp.inputs[0])
ret = Elemwise(node.op.scalar_op)(*new_inp)
if ret.dtype != node.outputs[0].dtype:
# The reduction do something about the dtype.
return
reduce_axis = node.op.axis
if reduce_axis is None:
reduce_axis = tuple(range(node.inputs[0].ndim))
# I put this warning late to don't add extra warning.
if len(reduce_axis) != 1 or 0 not in reduce_axis:
if theano.config.warn.reduce_join:
warnings.warning(
"Your current code is fine, but Theano versions "
"prior to 0.7 (or this development version Sept 2014) "
"might have given an incorrect result for this code. "
"To disable this warning, set the Theano flag "
"warn.reduce_join to False. The problem was an "
"optimization, that modified the pattern "
'"Reduce{scalar.op}(Join(axis=0, a, b), axis=0)", '
"did not check the reduction axis. So if the "
"reduction axis was not 0, you got a wrong answer."
)
return
# We add the new check late to don't add extra warning.
try:
join_axis = get_scalar_constant_value(
join.inputs[0], only_process_constants=True
)
if join_axis != reduce_axis[0]:
return
except NotScalarConstantError:
return
return [ret]
@register_canonicalize("fast_compile", "local_cut_useless_reduce")
@register_useless("local_cut_useless_reduce")
@local_optimizer(ALL_REDUCE)
def local_useless_reduce(node):
"""Sum(a, axis=[]) -> a """
if isinstance(node.op, tt.CAReduce):
(summed,) = node.inputs
# if reduce were doing anything, the output ndim would be reduced
if summed.type == node.outputs[0].type:
return [summed]
@register_canonicalize
@register_uncanonicalize
@register_specialize
@local_optimizer(ALL_REDUCE)
def local_reduce_broadcastable(node):
"""Remove reduction over broadcastable dimensions."""
if isinstance(node.op, tt.CAReduce):
(reduced,) = node.inputs
odtype = node.outputs[0].dtype
if node.op.axis is None:
if all(reduced.broadcastable):
return [reduced.dimshuffle().astype(odtype)]
else:
axis = list(node.op.axis)
cuttable = [a for a in axis if reduced.broadcastable[a]]
if cuttable:
# -- we can remove some axes of summation,
# which simplifies the codegen for sum, especially on GPU
new_axis = []
pattern = []
ii = 0
for p in range(reduced.ndim):
if p not in cuttable:
if p in axis:
new_axis.append(ii)
pattern.append(p)
ii += 1
new_reduced = reduced.dimshuffle(*pattern)
if new_axis:
if type(node.op) == CAReduce:
# This happen for tt.max(), tt.min()
new_op = node.op.__class__(node.op.scalar_op, axis=new_axis)
else:
new_op = node.op.__class__(axis=new_axis)
return [new_op(new_reduced)]
else:
# -- in this case we can remove the reduction completely
return [new_reduced.astype(odtype)]
@register_specialize
@local_optimizer([Sum, Prod])
def local_opt_alloc(node):
"""
sum(alloc(constant,shapes...)) => constant*prod(shapes)
or
prod(alloc(constant,shapes...)) => constant**prod(shapes)
"""
if isinstance(node.op, Sum) or isinstance(node.op, Prod):
(node_inps,) = node.inputs
if node_inps.owner and isinstance(node_inps.owner.op, Alloc):
input = node_inps.owner.inputs[0]
shapes = node_inps.owner.inputs[1:]
try:
val = get_scalar_constant_value(input, only_process_constants=True)
assert val.size == 1
val = val.reshape(1)[0]
# check which type of op
size = mul(*shapes)
if input.dtype in ["float16", "float32"]:
# shapes are ints and normally int64.
# We don't want to have a float64 upcast
# We don't want to downcast to float16
# as we fear it could loose too much precision
# that will be amplified by the mul/pow below.
size = size.astype("float32")
if node.op.axis is None or node.op.axis == tuple(range(input.ndim)):
if isinstance(node.op, Sum):
val = val * size
else:
val = val ** size
# Sum can change the input dtype (upcast or bool
# -> float32) by default or by user request.
# We can ignore the acc_dtype, as there is only 1
# elemwise we will do and not a sequence, so there is no
# accumulation of errors.
# So mostly, we just need to cast the output to the old
# dtype.
val = val.astype(node.outputs[0].dtype)
return [val]
to_prod = [shapes[i] for i in range(len(shapes)) if i in node.op.axis]
if to_prod:
size = mul(*to_prod)
if isinstance(node.op, Sum):
val *= size
else:
val = val ** size
# See comments above.
val = val.astype(node.outputs[0].dtype)
return [
alloc(
val,
*[
shapes[i]
for i in range(len(shapes))
if i not in node.op.axis
],
)
]
except NotScalarConstantError:
pass
@register_specialize
@local_optimizer([neg])
def local_neg_neg(node):
# other specializations shouldn't put this in,
# but sometimes they do
if node.op == neg:
if node.inputs[0].owner and node.inputs[0].owner.op == neg:
return [node.inputs[0].owner.inputs[0]]
@register_specialize
@local_optimizer([neg])
def local_neg_div_neg(node):
"""
- (-a / b) -> a / b
Also performs - (c / b) -> ((-c) / b) when c is a scalar constant.
"""
if node.op == neg:
if node.inputs[0].owner and node.inputs[0].owner.op == true_div:
frac = node.inputs[0]
num, denom = frac.owner.inputs
if num.owner and num.owner.op == neg:
if len(frac.clients) == 1:
# No other clients of the original division
new_num = num.owner.inputs[0]
return [true_div(new_num, denom)]
elif np.all(num.broadcastable) and isinstance(num, Constant):
if len(frac.clients) == 1:
new_num = -num.data
return [true_div(new_num, denom)]
@local_optimizer([mul])
def local_mul_zero(node):
"""
As part of canonicalization, we replace multiplication by zero
with zero.
"""
if node.op == mul:
otype = node.outputs[0].type
for i in node.inputs:
try:
value = get_scalar_constant_value(i)
except NotScalarConstantError:
continue
# print 'MUL by value', value, node.inputs
if value == 0:
# print '... returning zeros'
return _fill_chain(theano._asarray(0, dtype=otype.dtype), node.inputs)
register_canonicalize(local_mul_zero)
@local_optimizer([true_div])
def local_div_to_inv(node):
if node.op == true_div and np.all(
local_mul_canonizer.get_constant(node.inputs[0]) == 1.0
):
out = node.outputs[0]
new_out = inv(local_mul_canonizer.merge_num_denum(node.inputs[1:], []))
# The ones could have forced upcasting
if new_out.dtype != out.dtype:
new_out = tt.cast(new_out, dtype=out.dtype)
# The ones could have forced a specific length
if new_out.type != out.type:
new_out = broadcast_like(new_out, out, node.fgraph)
return [new_out]
else:
return False
register_specialize(local_div_to_inv)
@local_optimizer([inv])
def local_inv_canon(node):
if node.op == inv:
return [pow(node.inputs[0], -1.0)]
else:
return False
register_canonicalize(local_inv_canon)
@local_optimizer([pow])
def local_pow_canonicalize(node):
if node.op == pow:
cst = local_mul_canonizer.get_constant(node.inputs[1])
if cst == 0:
return [broadcast_like(1, node.outputs[0], node.fgraph)]
if cst == 1:
return [broadcast_like(node.inputs[0], node.outputs[0], node.fgraph)]
else:
return False
register_canonicalize(local_pow_canonicalize)
@register_specialize
@local_optimizer([mul])
def local_mul_to_sqr(node):
"""
x*x -> sqr(x)
This is faster on the GPU when memory fetching is a big part of
the computation time.
"""
if node.op == mul:
if len(node.inputs) == 2:
if node.inputs[0] is node.inputs[1]:
return [tt.sqr(node.inputs[0])]
@register_canonicalize
@local_optimizer([int_div])
def local_intdiv_by_one(node):
"""x // 1 -> x"""
if node.op in [int_div]:
if isinstance(node.inputs[1], tt.TensorConstant) and np.all(
node.inputs[1].value == 1
):
return [node.inputs[0].astype(node.outputs[0].dtype)]
@register_canonicalize
@register_specialize
@local_optimizer([int_div, true_div])
def local_zero_div(node):
"""0 / x -> 0"""
if isinstance(node.op, Elemwise) and isinstance(
node.op.scalar_op, (ts.IntDiv, ts.TrueDiv)
):
if local_mul_canonizer.get_constant(node.inputs[0]) == 0:
ret = broadcast_like(0, node.outputs[0], node.fgraph)
ret.tag.values_eq_approx = values_eq_approx_remove_nan
return [ret]
@local_optimizer([pow])
def local_pow_specialize(node):
# here, we are past the point of canonicalization, so we don't want
# to put in un-necessary fills.
if node.op == pow:
# the idea here is that we have pow(x, y)
odtype = node.outputs[0].dtype
xsym = node.inputs[0]
ysym = node.inputs[1]
y = local_mul_canonizer.get_constant(ysym)
if (y is not None) and encompasses_broadcastable(
xsym.type.broadcastable, ysym.type.broadcastable
):
rval = None
if np.all(y == 2):
rval = [tt.sqr(xsym)]
if np.all(y == 1):
rval = [xsym]
if np.all(y == 0):
rval = [fill(xsym, np.asarray(1, dtype=odtype))]
if np.all(y == 0.5):
rval = [tt.sqrt(xsym)]
if np.all(y == -0.5):
rval = [inv(tt.sqrt(xsym))]
if np.all(y == -1):
rval = [inv(xsym)]
if np.all(y == -2):
rval = [inv(tt.sqr(xsym))]
if rval:
rval[0] = tt.cast(rval[0], odtype)
assert rval[0].type == node.outputs[0].type, (rval, node.outputs)
return rval
else:
return False
register_specialize(local_pow_specialize)
@register_specialize_device
@local_optimizer([pow])
def local_pow_specialize_device(node):
"""
This optimization is not the same on all device. We do it only on cpu here.
"""
if node.op == pow:
# the idea here is that we have pow(x, y)
odtype = node.outputs[0].dtype
xsym = node.inputs[0]
ysym = node.inputs[1]
y = local_mul_canonizer.get_constant(ysym)
# the next line is needed to fix a strange case that I don't
# know how to make a separate test.
# That happen in the test_opt.py:test_log_erfc test.
# y is a ndarray with dtype int8 and value 2,4 or 6. This make
# the abs(y) <= 512 fail!
# taking the value outside ndarray solve the problem.
# it could be that in that case, numpy make the comparaison
# into the wrong type(do in int8 that overflow.)
if isinstance(y, np.ndarray):
assert y.size == 1
try:
y = y[0]
except IndexError:
pass
if (y is not None) and encompasses_broadcastable(
xsym.type.broadcastable, ysym.type.broadcastable
):
rval = None
# 512 is too small for the cpu and too big for some gpu!
if abs(y) == int(abs(y)) and abs(y) <= 512:
pow2 = [xsym]
pow2_scal = [ts.get_scalar_type(xsym.dtype)()]
y_to_do = abs(y)
for i in range(int(np.log2(y_to_do))):
pow2.append(tt.sqr(pow2[i]))
pow2_scal.append(ts.sqr(pow2_scal[i]))
rval1 = None
rval1_scal = None
while y_to_do > 0:
log_to_do = int(np.log2(y_to_do))
if rval1:
rval1 *= pow2[log_to_do]
rval1_scal *= pow2_scal[log_to_do]
else:
rval1 = pow2[log_to_do]
rval1_scal = pow2_scal[log_to_do]
y_to_do -= 2 ** log_to_do
if abs(y) > 2:
# We fuse all the pow together here to make
# compilation faster
rval1 = Elemwise(
ts.Composite([pow2_scal[0]], [rval1_scal])
).make_node(xsym)
if y < 0:
rval = [inv(rval1)]
else:
rval = [rval1]
if rval:
rval[0] = tt.cast(rval[0], odtype)
assert rval[0].type == node.outputs[0].type, (rval, node.outputs)
return rval
@local_optimizer([mul])
def local_mul_specialize(node):
"""
Remove special-case constants from mul arguments and useless neg in inputs.
mul(-1, x) -> neg(x)
mul(1, x, y) -> mul(x, y)
mul(0, ...) -> alloc(0, shapes...)
This is not done if we would add more nodes in the graph, like with:
mul(-1, x, y) -/-> neg(mul(x, y))
"""
# here, we are past the point of canonicalization, so we don't
# want to put in un-necessary fills.
#
# at this point [post canonicalize], mul() may have many inputs.
if node.op == mul:
# the idea here is that we have pow(x, y)
has_neg = False
new_inputs = []
nb_neg_node = 0
nb_cst = 0
for input in node.inputs:
# remove any neg arguments
while input.owner and input.owner.op == neg:
has_neg ^= True
input = input.owner.inputs[0]
nb_neg_node += 1
# remove special case arguments of 1, -1 or 0
y = local_mul_canonizer.get_constant(input)
if y == 1.0:
nb_cst += 1
elif y == -1.0:
nb_cst += 1
has_neg ^= True # toggles
elif y == 0.0:
# if we find any zero, we just return right away
return [broadcast_like(0, node.outputs[0], node.fgraph)]
else:
new_inputs.append(input)
if new_inputs != node.inputs:
if new_inputs:
if len(new_inputs) == 1:
if has_neg:
if new_inputs[0].dtype in (tt.uint_dtypes + ["bool"]):
return
else:
rval = -new_inputs[0]
else:
rval = new_inputs[0]
else:
# The next case would cause a replace by an equivalent case.
if has_neg and nb_neg_node == 0 and nb_cst == 1:
return
elif has_neg:
# Don't add an extra neg node as we can't
# fully replace this mul by a neg.
m1 = np.asarray(-1, dtype=node.outputs[0].dtype)
new_inputs = [m1] + new_inputs
rval = mul(*new_inputs)
return [broadcast_like(rval, node.outputs[0], node.fgraph)]
else:
# there are no variable inputs to mul
# N.B. this could have been constant-folded...
if has_neg:
return [broadcast_like(-1, node.outputs[0], node.fgraph)]
else:
return [broadcast_like(1, node.outputs[0], node.fgraph)]
register_specialize(local_mul_specialize)
@local_optimizer([add])
def local_add_specialize(node):
def fill_chain(v):
out = _fill_chain(v, node.inputs)
return out
# here, we are past the point of canonicalization, so we don't want
# to put in un-necessary fills.
if node.op == add:
new_inputs = []
for input in node.inputs:
try:
y = get_scalar_constant_value(input)
except NotScalarConstantError:
y = input
if np.all(y == 0.0):
continue
new_inputs.append(input)
if len(new_inputs) < len(node.inputs):
dtype = node.outputs[0].type.dtype
if len(new_inputs) == 0:
# we got rid of the entire expression!
ndim = node.outputs[0].type.ndim
# Reuse call to constant for cache()
cst = tt.constant(np.zeros((1,) * ndim, dtype=dtype))
assert cst.type.broadcastable == (True,) * ndim
return fill_chain(cst)
if len(new_inputs) == 1:
ret = fill_chain(new_inputs[0])
else:
ret = fill_chain(add(*new_inputs))
# The dtype should not be changed. It can happen if the input
# that was forcing upcasting was equal to 0.
if ret[0].dtype != dtype:
ret = [tt.cast(ret[0], dtype)]
return ret
else:
return False
register_specialize(local_add_specialize)
mul_canonizer = in2out(
gof.LocalOptGroup(local_mul_canonizer, local_fill_sink, apply_all_opts=True),
name="mul_canonizer_groups",
)
def check_for_x_over_absX(numerators, denominators):
"""Convert x/abs(x) into sign(x). """
# TODO: this function should dig/search through dimshuffles
# This won't catch a dimshuffled absolute value
for den in list(denominators):
if den.owner and den.owner.op == abs_ and den.owner.inputs[0] in numerators:
if den.owner.inputs[0].type.dtype.startswith("complex"):
# TODO: Make an Op that projects a complex number to
# have unit length but projects 0 to 0. That
# would be a weird Op, but consistent with the
# special case below. I heard there's some
# convention in Matlab that is similar to
# this... but not sure.
pass
else:
denominators.remove(den)
numerators.remove(den.owner.inputs[0])
numerators.append(tt.sgn(den.owner.inputs[0]))
return numerators, denominators
local_mul_canonizer.add_simplifier(check_for_x_over_absX, "X_over_absX")
@register_canonicalize
@local_optimizer([abs_])
def local_abs_lift(node):
"""
Move the abs toward the input.
This is needed for check_for_x_over_absX to apply in more case.
"""
if node.op == abs_ and node.inputs[0].owner:
assert node.nin == 1
if node.inputs[0].owner.op == mul:
return [mul(*[abs_(i) for i in node.inputs[0].owner.inputs])]
if node.inputs[0].owner.op == true_div:
i = node.inputs[0].owner.inputs
return [true_div(abs_(i[0]), abs_(i[1]))]
@register_specialize
@local_optimizer([mul, true_div])
def local_abs_merge(node):
"""
Merge abs generated by local_abs_lift when the canonizer don't
need it anymore
"""
if node.op == mul and sum([i.owner.op == abs_ for i in node.inputs if i.owner]) > 1:
inputs = []
for i in node.inputs:
if i.owner and i.owner.op == abs_:
inputs.append(i.owner.inputs[0])
elif isinstance(i, Constant):
try:
const = get_scalar_constant_value(i, only_process_constants=True)
except NotScalarConstantError:
return False
if not (const >= 0).all():
return False
inputs.append(i)
else:
return False
return [abs_(mul(*inputs))]
if (
node.op == true_div
and sum([i.owner.op == abs_ for i in node.inputs if i.owner]) == 2
):
return [
abs_(
true_div(node.inputs[0].owner.inputs[0], node.inputs[1].owner.inputs[0])
)
]
@register_stabilize
@register_specialize
@local_optimizer([log])
def local_log1p(node):
# log(1+x) -> log1p(x)
# log(1-x) -> log1p(-x)
if node.op == log:
(log_arg,) = node.inputs
if log_arg.owner and log_arg.owner.op == add:
scalars, scalar_inputs, nonconsts = scalarconsts_rest(
log_arg.owner.inputs, only_process_constants=True
)
# scalar_inputs are potentially dimshuffled and fill'd scalars
if scalars and np.allclose(np.sum(scalars), 1):
if nonconsts:
if len(nonconsts) > 1:
ninp = add(*nonconsts)
else:
ninp = nonconsts[0]
if ninp.dtype != log_arg.type.dtype:
ninp = ninp.astype(node.outputs[0].dtype)
return _fill_chain(log1p(ninp), scalar_inputs)
elif log_arg.owner and log_arg.owner.op == sub:
one = tt.extract_constant(
log_arg.owner.inputs[0], only_process_constants=True
)
if one != 1:
return
other = log_arg.owner.inputs[1]
if other.dtype != log_arg.dtype:
other = other.astype(log_arg.dtype)
return [log1p(neg(other))]
# TODO: in canonicalize, change log10 and log2 -> log
@register_stabilize
@register_specialize
@local_optimizer([log])
def local_log_add(node):
# log(exp(x)+exp(y))
#
# Suppose x >= y
# log(exp(x) + exp(y))
# log(exp(x) * (1 + exp(y)/exp(x)))
# x + log(1 + exp(y)/exp(x))
# x + log1p(exp(y)/exp(x))
# x + log1p(exp(y-x))
if node.op == log:
z = node.inputs[0]
if z.owner and z.owner.op == add:
zi = z.owner.inputs
if len(zi) != 2:
# -- upgrading Maximum to handle multiple inputs wasn't trivial
# TODO
# raise NotImplementedError()
return
pre_exp = [
x.owner.inputs[0] for x in zi if x.owner and x.owner.op == tt.exp
]
if len(pre_exp) == len(zi):
# all arguments to add are exp(<something>)
max_pre = tt.maximum(*pre_exp)
ret = max_pre + log1p(tt.exp(add(*[p - max_pre for p in pre_exp])))
ret.tag.values_eq_approx = values_eq_approx_remove_inf
return [ret]
@local_optimizer([log])
def local_log_sum_exp(node):
# log(sum_i(exp(x_i))) = x_max + log(sum_i(exp(x_i - x_max)))
if node.op != log:
return
sum_node = node.inputs[0].owner
# If the sum has keepdims=True, there might be a dimshuffle
if sum_node and isinstance(sum_node.op, tt.DimShuffle):
dimshuffle_op = sum_node.op
sum_node = sum_node.inputs[0].owner
else:
dimshuffle_op = None
if not sum_node or not isinstance(sum_node.op, Sum):
return
exp_node, axis = sum_node.inputs[0].owner, sum_node.op.axis
if not exp_node or not (
isinstance(exp_node.op, Elemwise) and isinstance(exp_node.op.scalar_op, ts.Exp)
):
return
pre_exp = exp_node.inputs[0]
max_pre_exp = tt.max(pre_exp, axis=axis)
max_pre_exp_keepdims = tt.makeKeepDims(pre_exp, max_pre_exp, axis)
ret = max_pre_exp + log(tt.sum(tt.exp(pre_exp - max_pre_exp_keepdims), axis=axis))
# Restore the dimshuffle op, if any.
if dimshuffle_op:
ret = dimshuffle_op(ret)
return [ret]
compile.optdb.register(
"local_log_sum_exp",
in2out(local_log_sum_exp, ignore_newtrees=True),
1.6,
"fast_run",
)
def add_calculate(num, denum, aslist=False, out_type=None):
# TODO: make sure that this function and mul_calculate are similar
if out_type is None:
zero = 0.0
else:
zero = theano._asarray(0, dtype=out_type.dtype)
# zero = 0.0 if out_type is None else theano._asarray(0,
# dtype=out_type.dtype)
if out_type and out_type.dtype == "bool":
if len(denum) == 0:
# NumPy 1.14 do not accept to do "bool - bool"
v = reduce(np.add, num, zero)
else:
raise Exception(
"bool subtraction not supported. This should not happen as"
" an earlier error should have been raised"
)
else:
v = reduce(np.add, num, zero) - reduce(np.add, denum, zero)
if aslist:
if np.all(v == 0):
return []
else:
return [v]
return v
local_add_canonizer = Canonizer(add, sub, neg, add_calculate)
add_canonizer = in2out(
gof.LocalOptGroup(local_add_canonizer, local_fill_sink, apply_all_opts=True),
name="add_canonizer_group",
)
register_canonicalize(local_add_canonizer, name="local_add_canonizer")
##################
# Distributivity #
##################
def distribute_greedy(pos_pairs, neg_pairs, num, denum, out_type, minscore=0):
# each pair in pos_pairs and neg_pairs is a num/denum pair. this
# function attempts to add num and denum to the corresponding parts
# of each pair, and counts how many multiplications/divisions can
# be saved in that way.
# each division is counted like div_cost multiplications
# (typically, division costs more so we are willing to multiply more
# in order to divide less)
# 1.5 was obtained through an informal test and may very well be
# platform dependent
div_cost = 1.5
# score is number of operations saved, higher is better
score = len(num) + div_cost * len(denum)
new_pos_pairs = list(
itertools.starmap(
local_mul_canonizer.simplify,
[(n + num, d + denum, out_type) for (n, d) in pos_pairs],
)
)
new_neg_pairs = list(
itertools.starmap(
local_mul_canonizer.simplify,
[(n + num, d + denum, out_type) for (n, d) in neg_pairs],
)
)
for (n, d), (nn, dd) in zip(pos_pairs + neg_pairs, new_pos_pairs + new_neg_pairs):
# We calculate how many operations we are saving with the new
# num and denum
score += len(n) + div_cost * len(d) - len(nn) - div_cost * len(dd)
if score <= minscore:
# the change is not applied because it adds too many operations
return False, pos_pairs, neg_pairs
return True, new_pos_pairs, new_neg_pairs
def attempt_distribution(factor, num, denum, out_type):
"""Try to insert each `num` and each `denum` in the factor?
Returns
-------
changes?, new_factor, new_num, new_denum
If there are changes, `new_num` and `new_denum` contain all the
numerators and denominators that could not be distributed in the factor
"""
pos_terms, neg_terms = local_add_canonizer.get_num_denum(factor)
if len(pos_terms) == 1 and not neg_terms:
return False, factor, num, denum
pos_pairs = list(map(local_mul_canonizer.get_num_denum, pos_terms))
neg_pairs = list(map(local_mul_canonizer.get_num_denum, neg_terms))
change = False
for n in list(num):
success, pos_pairs, neg_pairs = distribute_greedy(
pos_pairs, neg_pairs, [n], [], out_type
)
if success:
change = True
num.remove(n)
for d in list(denum):
success, pos_pairs, neg_pairs = distribute_greedy(
pos_pairs, neg_pairs, [], [d], out_type
)
if success:
change = True
denum.remove(d)
if not change:
return change, factor, num, denum
else:
return (
change,
local_add_canonizer.merge_num_denum(
list(itertools.starmap(local_mul_canonizer.merge_num_denum, pos_pairs)),
list(itertools.starmap(local_mul_canonizer.merge_num_denum, neg_pairs)),
),
num,
denum,
)
@register_canonicalize
@register_stabilize
@local_optimizer([mul, true_div, inv])
def local_greedy_distributor(node):
"""
Optimize by reducing the number of multiplications and/or divisions.
This optimization tries to apply distributivity of multiplication
to addition in order to reduce the number of multiplications
and/or divisions that must be done. The algorithm weighs division
more than multiplication to account for the former's slightly
greater computational cost.
The following expressions are simplified:
1. ((a/x + b/y) * x * y) --> a*y + b*x
2. ((a/x + b) * x) --> a + b*x
3. There are other forms too where node is a true_div.
The following expressions are not simplified:
4. ((a + b) * x) -/-> a*x + b*x
This optimization aims to reduce computational cost. It may also
increase numerical stability, e.g. when x and/or y tend to 0 in
example 1.
"""
out = node.outputs[0]
num, denum = local_mul_canonizer.get_num_denum(out)
if len(num) == 1 and not denum:
return False
new_num, new_denum = [], []
change = False
out_type = out.type
for candidate in list(num):
if candidate not in num:
continue
num.remove(candidate)
_change, candidate, num, denum = attempt_distribution(
candidate,
num,
denum,
out_type,
)
change |= _change
new_num.append(candidate)
for candidate in list(denum):
if candidate not in denum:
continue
denum.remove(candidate)
_change, candidate, denum, num = attempt_distribution(
candidate, denum, num, out_type
)
change |= _change
new_denum.append(candidate)
if not change:
return False
new_num += num
new_denum += denum
rval = local_mul_canonizer.merge_num_denum(new_num, new_denum)
if not (rval.type == out.type):
# WHY DOES THIS HAPPEN?
return False
return [rval]
@local_optimizer(None)
def constant_folding(node):
for input in node.inputs:
if not isinstance(input, Constant):
return False
# condition: all inputs are constant
if not node.op.do_constant_folding(node):
# The op asks not to be constant folded.
return False
storage_map = {i: [i.data] for i in node.inputs}
compute_map = {i: [True] for i in node.inputs}
for o in node.outputs:
storage_map[o] = [None]
compute_map[o] = [False]
impl = None
if hasattr(node.op, "python_constant_folding") and node.op.python_constant_folding(
node
):
impl = "py"
thunk = node.op.make_thunk(
node, storage_map, compute_map, no_recycling=[], impl=impl
)
required = thunk()
assert not required # a node whose inputs are all provided should always
# return successfully
rval = []
for output in node.outputs:
assert compute_map[output][0], (output, storage_map[output][0])
try:
constant = output.type.Constant
except AttributeError:
constant = Constant
v = constant(output.type, storage_map[output][0])
copy_stack_trace(output, v)
rval.append(v)
return rval
topo_constant_folding = in2out(
constant_folding, ignore_newtrees=True, name="topo_constant_folding"
)
register_canonicalize(topo_constant_folding, "fast_compile", final_opt=True)
register_uncanonicalize(topo_constant_folding, "fast_compile", final_opt=True)
register_stabilize(topo_constant_folding, "fast_compile", final_opt=True)
register_specialize(topo_constant_folding, "fast_compile", final_opt=True)
def get_clients(node):
"""
Used by erf/erfc opt to track less frequent op.
"""
return [c for c, i in node.outputs[0].clients if c != "output"]
def get_clients2(node):
"""
Used by erf/erfc opt to track less frequent op.
"""
l = []
for c, i in node.outputs[0].clients:
if c != "output":
for var in c.outputs:
l.extend([cc for cc, ii in var.clients if cc != "output"])
return l
# 1+erf(x)=>erfc(-x)
local_one_plus_erf = PatternSub(
(add, 1, (erf, "x")),
(erfc, (neg, "x")),
allow_multiple_clients=True,
name="local_one_plus_erf",
tracks=[erf],
get_nodes=get_clients,
)
register_canonicalize(local_one_plus_erf)
register_stabilize(local_one_plus_erf)
register_specialize(local_one_plus_erf)
# 1-erf(x)=>erfc(x)
local_one_minus_erf = PatternSub(
(sub, 1, (erf, "x")),
(erfc, "x"),
allow_multiple_clients=True,
name="local_one_minus_erf",
)
register_canonicalize(local_one_minus_erf)
register_stabilize(local_one_minus_erf)
register_specialize(local_one_minus_erf)
local_one_minus_erf2 = PatternSub(
(add, 1, (mul, -1, (erf, "x"))),
(erfc, "x"),
allow_multiple_clients=True,
name="local_one_minus_erf2",
)
register_canonicalize(local_one_minus_erf2)
register_stabilize(local_one_minus_erf2)
register_specialize(local_one_minus_erf2)
# 1+(-erf(x))=>erfc(x) This is a different graph then the previous as
# the canonicalize don't work completly
local_one_plus_neg_erf = PatternSub(
(add, 1, (neg, (erf, "x"))),
(erfc, "x"),
allow_multiple_clients=True,
name="local_one_plus_neg_erf",
tracks=[erf],
get_nodes=get_clients2,
)
register_canonicalize(local_one_plus_neg_erf)
register_stabilize(local_one_plus_neg_erf)
register_specialize(local_one_plus_neg_erf)
# (-1)+erf(x) => -erfc(x) don't need erf(x)+(-1) as the canonicalize
# will put the -1 as the first argument.
local_erf_minus_one = PatternSub(
(add, -1, (erf, "x")),
(neg, (erfc, "x")),
allow_multiple_clients=True,
name="local_erf_minus_one",
tracks=[erf],
get_nodes=get_clients,
)
register_canonicalize(local_erf_minus_one)
register_stabilize(local_erf_minus_one)
register_specialize(local_erf_minus_one)
# 1-erfc(x) => erf(x)
local_one_minus_erfc = PatternSub(
(sub, 1, (erfc, "x")),
(erf, "x"),
allow_multiple_clients=True,
name="local_one_minus_erfc",
tracks=[erfc],
get_nodes=get_clients,
)
register_canonicalize(local_one_minus_erfc)
register_stabilize(local_one_minus_erfc)
register_specialize(local_one_minus_erfc)
local_one_minus_erfc2 = PatternSub(
(add, 1, (neg, (erfc, "x"))),
(erf, "x"),
allow_multiple_clients=True,
name="local_one_minus_erfc2",
tracks=[erfc],
get_nodes=get_clients2,
)
register_canonicalize(local_one_minus_erfc2)
register_stabilize(local_one_minus_erfc2)
register_specialize(local_one_minus_erfc2)
local_one_minus_erfc3 = PatternSub(
(add, 1, (mul, -1, (erfc, "x"))),
(erf, "x"),
allow_multiple_clients=True,
name="local_one_minus_erfc3",
tracks=[erfc],
get_nodes=get_clients2,
)
register_canonicalize(local_one_minus_erfc3)
register_stabilize(local_one_minus_erfc3)
register_specialize(local_one_minus_erfc3)
# 1+(-erfc(x)) => erf(x) This is a different graph then the previous as
# the canonicalize don't work completly
local_one_add_neg_erfc = PatternSub(
(add, 1, (neg, (erfc, "x"))),
(erf, "x"),
allow_multiple_clients=True,
name="local_one_add_neg_erfc",
tracks=[erfc],
get_nodes=get_clients2,
)
register_canonicalize(local_one_add_neg_erfc)
register_stabilize(local_one_add_neg_erfc)
register_specialize(local_one_add_neg_erfc)
# (-1)+erfc(-x)=>erf(x)
local_erf_neg_minus_one = PatternSub(
(add, -1, (erfc, (neg, "x"))),
(erf, "x"),
allow_multiple_clients=True,
name="local_erf_neg_minus_one",
tracks=[erfc],
get_nodes=get_clients,
)
register_canonicalize(local_erf_neg_minus_one)
register_stabilize(local_erf_neg_minus_one)
register_specialize(local_erf_neg_minus_one)
# (-1)+erfc(-1*x)=>erf(x)
local_erf_neg_minus_one2 = PatternSub(
(add, -1, (erfc, (mul, -1, "x"))),
(erf, "x"),
allow_multiple_clients=True,
name="local_erf_neg_minus_one2",
tracks=[erfc],
get_nodes=get_clients,
)
register_canonicalize(local_erf_neg_minus_one2)
register_stabilize(local_erf_neg_minus_one2)
register_specialize(local_erf_neg_minus_one2)
@register_stabilize
@register_specialize
@local_optimizer([log])
def local_log_erfc(node):
"""Stability optimization for `log(erfc(x))`.
log(erfc(x)) => when x>threshold,
-x**2-log(x)-.5*log(pi)+log(1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6))
for float64: threshold=26.641747557 was choosed with:
[(i,numpy.log(scipy.special.erfc(numpy.asarray([i],dtype='float64'))))
for i in numpy.arange(26.641747557,26.6417475571,.00000000001)]
for float32: threshold=10.0541949, [(i,numpy.log(scipy.special.erfc(
numpy.asarray([i],dtype='float32')))) for i in numpy.arange(
10.0541948,10.0541951,.0000001)]
"""
if node.op != log:
return False
if not node.inputs[0].owner or node.inputs[0].owner.op != erfc:
return False
if hasattr(node.tag, "local_log_erfc_applied"):
# We use that flag to don't apply the optimization recursively
return False
node.tag.local_log_erfc_applied = True
x = node.inputs[0].owner.inputs[0]
stab_value = (
-(x ** 2)
- log(x)
- 0.5 * log(np.pi)
+ log(1 - 1 / (2 * x ** 2) + 3 / (4 * x ** 4) - 15 / (8 * x ** 6))
)
if node.outputs[0].dtype == "float32" or node.outputs[0].dtype == "float16":
threshold = 10.0541949
elif node.outputs[0].dtype == "float64":
threshold = 26.641747557
ret = tt.switch(x < threshold, node.outputs[0], stab_value)
ret.tag.values_eq_approx = values_eq_approx_remove_inf
return [ret]
@register_stabilize
@register_specialize
@local_optimizer([true_div])
def local_grad_log_erfc_neg(node):
"""Stability optimization for the grad of `log(erfc(x))`.
([y*]exp(-(x**2)))/erfc(x) # The y* is optional
([y*]exp(x**2))/erfc(-x) => [y*](when x>threashold,
sqrt(pi)*-x/(1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6)))
for float64: threshold=26.63 see at the end of the fct for the explanation
for float32: threshold=9.3 see at the end of the fct for the explanation
TODO: remove the contraint that there are only 2 inputs to exp(x**2)
is the second.
TODO: at the test point 10 in float32, there is instability in the original
value. The original gives -30.0, the stab -20.1 and in float64 -18.1.
Make it so that the test does not generate an error in that case!
"""
if node.op != true_div:
return False
if not node.inputs[1].owner or node.inputs[1].owner.op != erfc:
return False
erfc_in = node.inputs[1]
erfc_x = erfc_in.owner.inputs[0]
if not node.inputs[0].owner:
return False
# The mul is optional.
if node.inputs[0].owner.op != mul:
mul_in = None
y = []
if not node.inputs[0].owner or node.inputs[0].owner.op != tt.exp:
return False
exp_in = node.inputs[0]
else:
mul_in = node.inputs[0]
exp_in = None
for idx, inp in enumerate(mul_in.owner.inputs):
if inp.owner and inp.owner.op == tt.exp:
exp_in = inp
break
if len(mul_in.owner.inputs) == 2:
y = [mul_in.owner.inputs[1 - idx]]
else:
y = mul_in.owner.inputs[:]
del y[idx]
del mul_in
if not exp_in.owner.inputs[0].owner:
return False
if exp_in.owner.inputs[0].owner.op == neg:
neg_in = exp_in.owner.inputs[0]
if (
not neg_in.owner.inputs[0].owner
or neg_in.owner.inputs[0].owner.op != tt.sqr
):
return False
sqr_in = neg_in.owner.inputs[0]
x = sqr_in.owner.inputs[0]
elif exp_in.owner.inputs[0].owner.op == mul:
# We should compare that -(erfc_x**2) is equivalent to mul_neg.
# There is currently no easy way to do this in the general case,
# so we implement some common case for now.
# In many cases the neg are replaced by mul in the graph.
# This also allows to stabilize log(erfc(cst*x)).
mul_neg = exp_in.owner.inputs[0]
# In case that multiple mul are not fused together, we do it here.
def check_input(inputs):
new_inputs = []
for i in inputs:
if i.owner and i.owner.op == mul:
new_inputs.extend(check_input(i.owner.inputs))
else:
new_inputs.append(i)
return new_inputs
mul_inputs = check_input(mul_neg.owner.inputs)
# Put the constant first.
for i in range(len(mul_inputs)):
if isinstance(i, Constant):
if i == 0:
break
else:
tmp = mul_inputs[0]
mul_inputs[0] = mul_inputs[i]
mul_inputs[i] = tmp
break
mul_neg = mul(*mul_inputs)
try:
cst2 = get_scalar_constant_value(
mul_neg.owner.inputs[0], only_process_constants=True
)
except NotScalarConstantError:
return False
if len(mul_neg.owner.inputs) == 2:
if (
not mul_neg.owner.inputs[1].owner
or mul_neg.owner.inputs[1].owner.op != tt.sqr
):
return False
sqr_in = mul_neg.owner.inputs[1]
x = sqr_in.owner.inputs[0]
elif len(mul_neg.owner.inputs) == 3:
if mul_neg.owner.inputs[1] is not mul_neg.owner.inputs[2]:
return False
x = mul_neg.owner.inputs[1]
else:
return False
if cst2 != -1:
if (
not erfc_x.owner
or erfc_x.owner.op != mul
or len(erfc_x.owner.inputs) != 2
):
# todo implement that case
return False
if erfc_x.owner.inputs[1] is not mul_neg.owner.inputs[1]:
return False
x = erfc_x
try:
cst = get_scalar_constant_value(
erfc_x.owner.inputs[0], only_process_constants=True
)
except NotScalarConstantError:
return False
if cst2 != -cst * 2:
return False
# The constant is valid. Must check that the
elif erfc_x is not x:
return False
else:
return False
if hasattr(node.tag, "local_grad_log_erfc_neg"):
# We use that flag to don't apply the optimization recursively
return False
# we move the y outside the div.
true_div_no_mul = true_div(exp_in, erfc_in)
true_div_no_mul.owner.tag.local_grad_log_erfc_neg = True
# aaron value
stab_value = (
x
* pow(1 - 1 / (2 * (x ** 2)) + 3 / (4 * (x ** 4)) - 15 / (8 * (x ** 6)), -1)
* tt.cast(tt.sqrt(np.pi), dtype=x.dtype)
)
if x.dtype == "float32" or x.dtype == "float16":
threshold = 9.3
# threshold = 10.1
elif x.dtype == "float64":
threshold = 26.641747557
ret = tt.switch(x < threshold, true_div_no_mul, stab_value)
if y:
ret = mul(ret, *y)
ret.tag.values_eq_approx = values_eq_approx_remove_inf_nan
return [ret]
def local_elemwise_fusion_op(op_class, max_input_fct=lambda node: 32, maker=None):
"""Create a recursive function that fuses `Elemwise` `Op`s.
The basic idea is that we loop through an `Elemwise` node's inputs, find
other `Elemwise` nodes, determine the scalars input types for all of the
`Elemwise` `Op`s, construct a new scalar `Op` using the scalar input types
and each `Elemwise`'s scalar `Op`, and use the composite scalar `Op` in a
new "fused" `Elemwise`.
It's parameterized in order to work for `Elemwise` and `GpuElemwise` `Op`s.
Parameters
----------
op_class : type
`GpuElemwise` or `Elemwise` class (the one that we want to fuse)
max_input_fct : callable
A function that returns the maximum number of inputs that this `Elemwise`
can take (useful for `GpuElemwise`). The GPU kernel currently has a
limit of 256 bytes for the size of all parameters passed to it. As
currently we pass a lot of information only by parameter, we must limit how
many `Op`s we fuse together to avoid busting that 256 limit.
On the CPU we limit to 32 input variables since that is the maximum
NumPy support.
maker: callable
A function with the signature `(node, *args)` that constructs an
`op_class` instance (e.g. `op_class(*args)`).
"""
if maker is None:
def maker(node, scalar_op):
return op_class(scalar_op)
def local_fuse(node):
"""Fuse `Elemwise` `Op`s in a node.
As part of specialization, we fuse two consecutive elemwise `Op`s of the
same shape.
For mixed dtype, we let the `Composite` `Op` do the cast. It lets the C
compiler do the cast.
The number of dimensions is validated at call time by Theano itself.
"""
# META TODO: PUT THESE THINGS IN TRAC, NOT TODO NOTES!!
# TODO: use broadcast flag?
# TODO: don't do this optimization as a localOptimizer.
# Analyze the graph in terms of elemwise subgraphs, and then
# replace each subgraph with a Composite version.
# TODO: use malloc and copy to transfer arguments that don't
# fit within the parameter space of 256 bytes
#
# TODO: Merge with multiple output to merge when an inputs
# have multiple clients. This can't be done with a local
# optimiser.
# TODO: Related: Support composites with multiple outputs
# TODO: Use Composite to combine Elemwise and Reduce
# operations. We have to loop over the data anyway... might
# as well sum it up while we're at it (this can be trickier
# than i'm making it seound here. The data-traversal should be
# done contiguously, and the summing-up might not be easy or
# worthwhile if the summation axis doesn't line up with a
# contiguous dimension)
if type(node.op) is not op_class:
return False
if len(node.outputs) > 1:
# We don't support fusion for nodes with multiple outputs.
return
inputs = [] # inputs of the new Elemwise op.
s_inputs = [] # inputs of the new scalar op used by the Composite.
# Inputs of the new scalar op that represents the current node.
s_g = []
# There is a hard limit of 256 bytes for the formal argument list to a
# GPU kernel function.
max_nb_input = max_input_fct(node)
# The number of inputs to the new fused op if we do not fuse more
# inputs.
new_nb_input = len(node.inputs)
# Did we fuse something?
# Needed as we can fuse unary op that don't change the number of
# inputs.
# And there is a case where the inputs are the same as the current
# node. That won't change the number of inputs of the new op.
fused = False
for i in node.inputs:
do_fusion = False
# Will store inputs of the fused node that are not currently inputs
# of the node we want to create (to avoid duplicating inputs).
tmp_input = []
# Same as tmp_input, but for scalars.
tmp_scalar = []
# We should not check the number of inputs here
# As fusing op don't always change the number of input.
# If a variable is used as multiple into to the same node,
# we still want to fusion. So we take the set.
if (
i.owner
and isinstance(i.owner.op, op_class)
and len({n for n, idx in i.clients}) == 1
and
# Do not merge elemwise that don't have the same
# broadcastable pattern to don't redo duplicate
# computation due to broadcast.
i.owner.outputs[0].broadcastable == node.outputs[0].broadcastable
):
try:
tmp_s_input = []
# we should not put duplicate input into s_inputs and inputs
for ii in i.owner.inputs:
if ii in inputs:
tmp_s_input.append(s_inputs[inputs.index(ii)])
elif ii in tmp_input:
tmp_s_input.append(tmp_scalar[tmp_input.index(ii)])
else:
tmp = ts.get_scalar_type(ii.dtype).make_variable()
try:
tv = gof.op.get_test_value(ii)
if tv.size > 0:
tmp.tag.test_value = tv.flatten()[0]
else:
_logger.warning(
"Cannot construct a scalar test value"
" from a test value with no size: {}".format(ii)
)
except TestValueError:
pass
tmp_s_input.append(tmp)
tmp_input.append(ii)
tmp_scalar.append(tmp_s_input[-1])
s_op = i.owner.op.scalar_op(*tmp_s_input, return_list=True)
# if the scalar_op don't have a c implementation,
# we skip its fusion to allow the fusion of the
# other ops.
i.owner.op.scalar_op.c_code(
s_op[0].owner,
"test_presence_of_c_code",
["x" for x in i.owner.inputs],
["z" for z in i.owner.outputs],
{"fail": "%(fail)s"},
)
do_fusion = True
except (NotImplementedError, MethodNotDefined):
_logger.warning(
(
"%s does not implement the c_code function."
" As well as being potentially slow, this"
" disables loop fusion of this op."
)
% str(i.owner.op.scalar_op)
)
do_fusion = False
# Compute the number of inputs in case we fuse this input.
# We subtract 1 because we replace the existing input with the new
# inputs from `tmp_input`.
new_nb_input_ = new_nb_input + len(tmp_input) - 1
# If the new input is already an input of the current node, it was
# already counted when `new_nb_input` was initialized to
# len(node.inputs).
# This can happen when a variable is used both by the Elemwise to
# fuse and the current node.
for x in tmp_input:
if x in node.inputs:
new_nb_input_ -= 1
if do_fusion and (new_nb_input_ <= max_nb_input):
fused = True
new_nb_input = new_nb_input_
inputs.extend(tmp_input)
s_inputs.extend(tmp_scalar)
s_g.extend(s_op)
else:
# We must support the case where the same variable appears many
# times within the inputs
if inputs.count(i) == node.inputs.count(i):
s = s_inputs[inputs.index(i)]
else:
s = ts.get_scalar_type(i.dtype).make_variable()
try:
if theano.config.compute_test_value != "off":
v = gof.op.get_test_value(i)
if v.size > 0:
s.tag.test_value = v.flatten()[0]
except TestValueError:
pass
inputs.append(i)
s_inputs.append(s)
s_g.append(s)
if not fused:
return False
if new_nb_input != len(inputs) or len(s_inputs) != len(inputs):
raise Exception(
"""Something has gone wrong with the elemwise
fusion optimization. We skip this optimization. You can ignore this message,
your code will run correctly, but may be slower."""
)
s_new_out = node.op.scalar_op(*s_g, return_list=True)
try:
s_new_out[0].owner.op.c_code(
s_new_out[0].owner,
"test_presence_of_c_code",
["x" for x in s_g],
["z" for x in s_new_out],
{"fail": "%(fail)s"},
)
except (NotImplementedError, MethodNotDefined):
_logger.warning(
(
"%s does not implement the c_code function."
" As well as being potentially slow, this disables "
"loop fusion of this op."
)
% str(s_new_out[0].owner.op)
)
# create the composite op.
composite_op = ts.Composite(s_inputs, s_new_out)
# create the new node.
# Do not call make_node to have test_value
new_node = maker(node, composite_op)(*inputs).owner
assert len(new_node.outputs) == 1
assert node.outputs[0].dtype == new_node.outputs[0].dtype
if len(new_node.inputs) > max_nb_input:
_logger.warning(
"loop fusion failed because Op would exceed" " kernel argument limit."
)
return False
# we fuse as many that we can at the same time to make debug mode faster
# debug mode will be faster as it won't test all intermediate step.
while True:
ret = local_fuse(new_node)
if ret is not False and ret is not None:
assert len(ret) == len(new_node.outputs)
assert len(ret) == 1
new_node = ret[0].owner
else:
break
return new_node.outputs
return local_fuse
def elemwise_max_input_fct(node):
# The Elemwise.perform use numpy ufunc and they are limited to 31
# inputs.
if not theano.config.cxx:
return 31
return 1024
local_elemwise_fusion = local_elemwise_fusion_op(Elemwise, elemwise_max_input_fct)
class FusionOptimizer(Optimizer):
"""Graph optimizer for Fusion of elemwise operations."""
def __init__(self, local_optimizer):
Optimizer.__init__(self)
self.optimizer = local_optimizer
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
did_something = True
nb_iter = 0
nb_replacement = 0
nb_inconsistency_replace = 0
time_toposort = 0
if fgraph.profile:
validate_before = fgraph.profile.validate_time
callbacks_before = fgraph.execute_callbacks_times.copy()
callback_before = fgraph.execute_callbacks_time
while did_something:
t0 = time.time()
nodelist = list(fgraph.toposort())
time_toposort += time.time() - t0
nodelist.reverse()
did_something = False
for node in nodelist:
# Don't try to fuse node that have already been fused.
if node in fgraph.apply_nodes:
new_outputs = self.optimizer(node)
if new_outputs:
assert len(new_outputs) == len(node.outputs)
try:
fgraph.replace_all_validate(
list(zip(node.outputs, new_outputs)),
reason=self.__class__.__name__,
)
did_something = True
nb_replacement += 1
except InconsistencyError:
nb_inconsistency_replace += 1
nb_iter += 1
if fgraph.profile:
validate_time = fgraph.profile.validate_time - validate_before
callback_time = fgraph.execute_callbacks_time - callback_before
callbacks_time = {}
for k, v in fgraph.execute_callbacks_times.items():
if k in callbacks_before:
callbacks_time[k] = v - callbacks_before[k]
else:
callbacks_time[k] = v
else:
validate_time = None
callback_time = None
callbacks_time = {}
return (
self,
nb_iter,
nb_replacement,
nb_inconsistency_replace,
validate_time,
callback_time,
callbacks_time,
time_toposort,
)
@staticmethod
def print_profile(stream, prof, level=0):
blanc = " " * level
print(blanc, "FusionOptimizer", file=stream)
print(blanc, " nb_iter", prof[1], file=stream)
print(blanc, " nb_replacement", prof[2], file=stream)
print(blanc, " nb_inconsistency_replace", prof[3], file=stream)
print(blanc, " validate_time", prof[4], file=stream)
print(blanc, " callback_time", prof[5], file=stream)
if prof[5] > 1:
print(blanc, " callbacks_time", file=stream)
for i in sorted(prof[6].items(), key=lambda a: a[1])[::-1]:
if i[1] > 0:
print(blanc, " ", i)
print(blanc, " time_toposort", prof[7], file=stream)
def local_add_mul_fusion(node):
"""Fuse consecutive add or mul in one such node with more inputs.
It is better to fuse add/mul that way then in a Composite node as
this make the inner graph of the Composite smaller. This allow to
put more computation in a Composite before hitting the max
recusion limit when pickling Composite.
"""
if not isinstance(node.op, Elemwise) or not isinstance(
node.op.scalar_op, (ts.Add, ts.Mul)
):
return False
s_op = node.op.scalar_op.__class__
new_inp = []
fused = False
nb_inputs = len(node.inputs)
max_inputs = float("inf")
if hasattr(node.op, "max_inputs"):
max_inputs = node.op.max_inputs(node)
for inp in node.inputs:
if (
inp.owner
and isinstance(inp.owner.op, Elemwise)
and isinstance(inp.owner.op.scalar_op, s_op)
and
# Do not duplicate the operation.
len(inp.clients) == 1
and (nb_inputs + len(inp.owner.inputs) - 1) <= max_inputs
):
new_inp.extend(inp.owner.inputs)
fused = True
else:
new_inp.append(inp)
# We can not compare the number of inputs as Mul and Add could have
# 0 or 1 inputs in some corner cases.
if fused:
output = node.op(*new_inp)
copy_stack_trace(node.outputs[0], output)
# Do the recursion here to help lower the number of
# FusionOptimizer iteration.
if output.owner:
output2 = local_add_mul_fusion(output.owner)
if output2:
return output2
return [output]
if config.tensor.local_elemwise_fusion:
_logger.debug("enabling optimization fusion elemwise in fast_run")
# Must be after gpu(48.5) and before AddDestroyHandler(49.5)
fuse_seqopt = gof.SequenceDB()
fuse_seqopt.register(
"local_add_mul_fusion",
FusionOptimizer(local_add_mul_fusion),
0,
"fast_run",
"fusion",
)
fuse_seqopt.register(
"composite_elemwise_fusion",
FusionOptimizer(local_elemwise_fusion),
1,
"fast_run",
"fusion",
)
compile.optdb.register(
"elemwise_fusion",
fuse_seqopt,
49,
"fast_run",
"fusion",
"local_elemwise_fusion",
"FusionOptimizer",
)
else:
_logger.debug("not enabling optimization fusion elemwise in fast_run")
compile.optdb.register(
"elemwise_fusion",
FusionOptimizer(local_elemwise_fusion),
49,
"fusion",
"local_elemwise_fusion",
"FusionOptimizer",
)
@register_canonicalize
@local_optimizer([Elemwise])
def local_useless_composite(node):
"""For elemwise Composite that have multiple outputs, remove the
outputs that are not used.
"""
if not isinstance(node.op, Elemwise) or not isinstance(
node.op.scalar_op, ts.Composite
):
return
comp = node.op.scalar_op
idx = [i for i, o_extern in enumerate(node.outputs) if o_extern.clients]
if len(idx) < len(node.outputs):
new_outputs = [comp.outputs[i] for i in idx]
c = ts.Composite(inputs=comp.inputs, outputs=new_outputs)
e = Elemwise(scalar_op=c)(*node.inputs, return_list=True)
return dict(zip([node.outputs[i] for i in idx], e))
# ############################
# # Remove consider_constant #
# ############################
# Although the ops ConsiderConstant, ZeroGrad and DisconnectedGrad
# just returns the input, it should be removed from the graph to
@register_canonicalize("fast_compile")
@register_useless("fast_compile")
@local_optimizer(None)
def local_view_op(node):
if isinstance(node.op, theano.compile.ops.ViewOp):
return node.inputs
@register_useless
@register_canonicalize
@register_stabilize
@register_specialize
@local_optimizer([Alloc])
def local_merge_alloc(node):
# This opt takes care of several cases:
# Alloc(Alloc(m, x, 1, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)
# Alloc(Alloc(m, y, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)
# Alloc(Alloc(m, y1, 1, 1), x, y2, z, w) -> Alloc(m, x, assert(y1, y1==y2), z, w)
if not isinstance(node.op, Alloc):
return False
if not node.inputs[0].owner or not isinstance(node.inputs[0].owner.op, Alloc):
return False
inputs_outer = node.inputs
inputs_inner = node.inputs[0].owner.inputs
dims_outer = inputs_outer[1:]
dims_inner = inputs_inner[1:]
dims_outer_rev = dims_outer[::-1]
dims_inner_rev = dims_inner[::-1]
# check if the pattern of broadcasting is matched, in the reversed ordering.
# The reverse ordering is needed when an Alloc add an implicit new
# broadcasted dimensions to its inputs[0]. Eg:
# Alloc(Alloc(m, y, 1, 1), x, y, z, w) -> Alloc(m, x, y, z, w)
i = 0
for dim_inner, dim_outer in zip(dims_inner_rev, dims_outer_rev):
if dim_inner != dim_outer:
if isinstance(dim_inner, Constant) and dim_inner.data == 1:
pass
else:
dims_outer[-1 - i] = Assert(
"You have a shape error in your graph. To see a better"
" error message and a stack trace of where in your code"
" the error is created, use the Theano flags"
" optimizer=None or optimizer=fast_compile."
)(dim_outer, tt.eq(dim_outer, dim_inner))
i += 1
return [alloc(inputs_inner[0], *dims_outer)]
@register_useless("fast_compile")
@local_optimizer([TopKOp])
def local_useless_topk(node):
"""
TopKOp generates two outputs by default
This opt removes the useless ones
"""
op = node.op
if not isinstance(op, TopKOp):
return
if not (op.return_values and op.return_indices):
return False
x, k = node.inputs
ret_val = bool(node.outputs[0].clients)
ret_idx = bool(node.outputs[1].clients)
if not (ret_val ^ ret_idx):
# both true -> nothing to remove
# both false -> let pruner handle
return False
old_output = node.outputs[ret_idx]
new_output = TopKOp(
axis=op.axis,
sorted=op.sorted,
idx_dtype=op.idx_dtype,
return_values=ret_val,
return_indices=ret_idx,
)(x, k)
copy_stack_trace(node.outputs[0], new_output)
return {old_output: new_output}
| 36.11871
| 109
| 0.567642
|
e66eb5c049e6a4d420c1e01f88b24ffdcd60c6fa
| 10,411
|
py
|
Python
|
src/python/m5/util/convert.py
|
hyu-iot/gem5
|
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
|
[
"BSD-3-Clause"
] | 765
|
2015-01-14T16:17:04.000Z
|
2022-03-28T07:46:28.000Z
|
src/python/m5/util/convert.py
|
hyu-iot/gem5
|
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
|
[
"BSD-3-Clause"
] | 148
|
2018-07-20T00:58:36.000Z
|
2021-11-16T01:52:33.000Z
|
src/python/m5/util/convert.py
|
hyu-iot/gem5
|
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
|
[
"BSD-3-Clause"
] | 807
|
2015-01-06T09:55:38.000Z
|
2022-03-30T10:23:36.000Z
|
# Copyright (c) 2021 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# metric prefixes
atto = 1.0e-18
femto = 1.0e-15
pico = 1.0e-12
nano = 1.0e-9
micro = 1.0e-6
milli = 1.0e-3
kilo = 1.0e3
mega = 1.0e6
giga = 1.0e9
tera = 1.0e12
peta = 1.0e15
exa = 1.0e18
# power of 2 prefixes
kibi = 1024
mebi = kibi * 1024
gibi = mebi * 1024
tebi = gibi * 1024
pebi = tebi * 1024
exbi = pebi * 1024
metric_prefixes = {
'Ei': exbi,
'E': exa,
'Pi': pebi,
'P': peta,
'Ti': tebi,
'T': tera,
'Gi': gibi,
'G': giga,
'M': mega,
'Ki': kibi,
'k': kilo,
'Mi': mebi,
'm': milli,
'u': micro,
'n': nano,
'p': pico,
'f': femto,
'a': atto,
}
binary_prefixes = {
'Ei': exbi,
'E' : exbi,
'Pi': pebi,
'P' : pebi,
'Ti': tebi,
'T' : tebi,
'Gi': gibi,
'G' : gibi,
'Mi': mebi,
'M' : mebi,
'Ki': kibi,
'k' : kibi,
}
def assertStr(value):
if not isinstance(value, str):
raise TypeError("wrong type '%s' should be str" % type(value))
def _split_suffix(value, suffixes):
'''Split a string based on a suffix from a list of suffixes.
:param value: String value to test for a matching suffix.
:param suffixes: Container of suffixes to test.
:returns: A tuple of (value, suffix). Suffix is the empty string
if there is no match.
'''
matches = [ sfx for sfx in suffixes if value.endswith(sfx) ]
assert len(matches) <= 1
return (value[:-len(matches[0])], matches[0]) if matches \
else (value, '')
def toNum(value, target_type, units, prefixes, converter):
'''Convert a string using units and prefixes to (typically) a float or
integer.
String values are assumed to either be a naked magnitude without a
unit or prefix, or a magnitude with a unit and an optional prefix.
:param value: String value to convert.
:param target_type: Type name for error messages.
:param units: Unit (string) or list of valid units.
:param prefixes: Mapping of prefixes to multipliers.
:param converter: Helper function to convert magnitude to native
type.
:returns: Tuple of (converted value, unit)
'''
assertStr(value)
def convert(val):
try:
return converter(val)
except ValueError:
raise ValueError(
"cannot convert '%s' to %s" % (value, target_type))
# Units can be None, the empty string, or a list/tuple. Convert
# to a tuple for consistent handling.
if not units:
units = tuple()
elif isinstance(units, str):
units = (units,)
else:
units = tuple(units)
magnitude_prefix, unit = _split_suffix(value, units)
# We only allow a prefix if there is a unit
if unit:
magnitude, prefix = _split_suffix(magnitude_prefix, prefixes)
scale = prefixes[prefix] if prefix else 1
else:
magnitude, prefix, scale = magnitude_prefix, '', 1
return convert(magnitude) * scale, unit
def toFloat(value, target_type='float', units=None, prefixes=[]):
return toNum(value, target_type, units, prefixes, float)[0]
def toMetricFloat(value, target_type='float', units=None):
return toFloat(value, target_type, units, metric_prefixes)
def toBinaryFloat(value, target_type='float', units=None):
return toFloat(value, target_type, units, binary_prefixes)
def toInteger(value, target_type='integer', units=None, prefixes=[]):
return toNum(value, target_type, units, prefixes,
lambda x: int(x, 0))[0]
def toMetricInteger(value, target_type='integer', units=None):
return toInteger(value, target_type, units, metric_prefixes)
def toBinaryInteger(value, target_type='integer', units=None):
return toInteger(value, target_type, units, binary_prefixes)
def toBool(value):
assertStr(value)
value = value.lower()
if value in ('true', 't', 'yes', 'y', '1'):
return True
if value in ('false', 'f', 'no', 'n', '0'):
return False
raise ValueError("cannot convert '%s' to bool" % value)
def toFrequency(value):
return toMetricFloat(value, 'frequency', 'Hz')
def toLatency(value):
return toMetricFloat(value, 'latency', 's')
def anyToLatency(value):
"""Convert a magnitude and unit to a clock period."""
magnitude, unit = toNum(value,
target_type='latency',
units=('Hz', 's'),
prefixes=metric_prefixes,
converter=float)
if unit == 's':
return magnitude
elif unit == 'Hz':
try:
return 1.0 / magnitude
except ZeroDivisionError:
raise ValueError(f"cannot convert '{value}' to clock period")
else:
raise ValueError(f"'{value}' needs a valid unit to be unambiguous.")
def anyToFrequency(value):
"""Convert a magnitude and unit to a clock frequency."""
magnitude, unit = toNum(value,
target_type='frequency',
units=('Hz', 's'),
prefixes=metric_prefixes,
converter=float)
if unit == 'Hz':
return magnitude
elif unit == 's':
try:
return 1.0 / magnitude
except ZeroDivisionError:
raise ValueError(f"cannot convert '{value}' to frequency")
else:
raise ValueError(f"'{value}' needs a valid unit to be unambiguous.")
def toNetworkBandwidth(value):
return toMetricFloat(value, 'network bandwidth', 'bps')
def toMemoryBandwidth(value):
return toBinaryFloat(value, 'memory bandwidth', 'B/s')
def toMemorySize(value):
return toBinaryInteger(value, 'memory size', 'B')
def toIpAddress(value):
if not isinstance(value, str):
raise TypeError("wrong type '%s' should be str" % type(value))
bytes = value.split('.')
if len(bytes) != 4:
raise ValueError('invalid ip address %s' % value)
for byte in bytes:
if not 0 <= int(byte) <= 0xff:
raise ValueError('invalid ip address %s' % value)
return (int(bytes[0]) << 24) | (int(bytes[1]) << 16) | \
(int(bytes[2]) << 8) | (int(bytes[3]) << 0)
def toIpNetmask(value):
if not isinstance(value, str):
raise TypeError("wrong type '%s' should be str" % type(value))
(ip, netmask) = value.split('/')
ip = toIpAddress(ip)
netmaskParts = netmask.split('.')
if len(netmaskParts) == 1:
if not 0 <= int(netmask) <= 32:
raise ValueError('invalid netmask %s' % netmask)
return (ip, int(netmask))
elif len(netmaskParts) == 4:
netmaskNum = toIpAddress(netmask)
if netmaskNum == 0:
return (ip, 0)
testVal = 0
for i in range(32):
testVal |= (1 << (31 - i))
if testVal == netmaskNum:
return (ip, i + 1)
raise ValueError('invalid netmask %s' % netmask)
else:
raise ValueError('invalid netmask %s' % netmask)
def toIpWithPort(value):
if not isinstance(value, str):
raise TypeError("wrong type '%s' should be str" % type(value))
(ip, port) = value.split(':')
ip = toIpAddress(ip)
if not 0 <= int(port) <= 0xffff:
raise ValueError('invalid port %s' % port)
return (ip, int(port))
def toVoltage(value):
return toMetricFloat(value, 'voltage', 'V')
def toCurrent(value):
return toMetricFloat(value, 'current', 'A')
def toEnergy(value):
return toMetricFloat(value, 'energy', 'J')
def toTemperature(value):
"""Convert a string value specified to a temperature in Kelvin"""
magnitude, unit = toNum(value,
target_type='temperature',
units=('K', 'C', 'F'),
prefixes=metric_prefixes,
converter=float)
if unit == 'K':
kelvin = magnitude
elif unit == 'C':
kelvin = magnitude + 273.15
elif unit == 'F':
kelvin = (magnitude + 459.67) / 1.8
else:
raise ValueError(f"'{value}' needs a valid temperature unit.")
if kelvin < 0:
raise ValueError(f"{value} is an invalid temperature")
return kelvin
| 32.033846
| 76
| 0.6326
|
ee3ef8c6d770029d574dee8516a8c2f54ac4b511
| 4,749
|
py
|
Python
|
ce_api/models/body_update_loggedin_user_api_v1_users_put.py
|
maiot-io/cengine
|
3a1946c449e8c5e1d216215df6eeab941eb1640a
|
[
"Apache-2.0"
] | 7
|
2020-10-13T12:47:32.000Z
|
2021-03-12T12:00:14.000Z
|
ce_api/models/body_update_loggedin_user_api_v1_users_put.py
|
maiot-io/cengine
|
3a1946c449e8c5e1d216215df6eeab941eb1640a
|
[
"Apache-2.0"
] | null | null | null |
ce_api/models/body_update_loggedin_user_api_v1_users_put.py
|
maiot-io/cengine
|
3a1946c449e8c5e1d216215df6eeab941eb1640a
|
[
"Apache-2.0"
] | 1
|
2021-01-23T02:19:42.000Z
|
2021-01-23T02:19:42.000Z
|
# coding: utf-8
"""
maiot Core Engine API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BodyUpdateLoggedinUserApiV1UsersPut(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'password': 'str',
'full_name': 'str',
'email': 'str'
}
attribute_map = {
'password': 'password',
'full_name': 'full_name',
'email': 'email'
}
def __init__(self, password=None, full_name=None, email=None): # noqa: E501
"""BodyUpdateLoggedinUserApiV1UsersPut - a model defined in Swagger""" # noqa: E501
self._password = None
self._full_name = None
self._email = None
self.discriminator = None
if password is not None:
self.password = password
if full_name is not None:
self.full_name = full_name
if email is not None:
self.email = email
@property
def password(self):
"""Gets the password of this BodyUpdateLoggedinUserApiV1UsersPut. # noqa: E501
:return: The password of this BodyUpdateLoggedinUserApiV1UsersPut. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this BodyUpdateLoggedinUserApiV1UsersPut.
:param password: The password of this BodyUpdateLoggedinUserApiV1UsersPut. # noqa: E501
:type: str
"""
self._password = password
@property
def full_name(self):
"""Gets the full_name of this BodyUpdateLoggedinUserApiV1UsersPut. # noqa: E501
:return: The full_name of this BodyUpdateLoggedinUserApiV1UsersPut. # noqa: E501
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this BodyUpdateLoggedinUserApiV1UsersPut.
:param full_name: The full_name of this BodyUpdateLoggedinUserApiV1UsersPut. # noqa: E501
:type: str
"""
self._full_name = full_name
@property
def email(self):
"""Gets the email of this BodyUpdateLoggedinUserApiV1UsersPut. # noqa: E501
:return: The email of this BodyUpdateLoggedinUserApiV1UsersPut. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this BodyUpdateLoggedinUserApiV1UsersPut.
:param email: The email of this BodyUpdateLoggedinUserApiV1UsersPut. # noqa: E501
:type: str
"""
self._email = email
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BodyUpdateLoggedinUserApiV1UsersPut, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BodyUpdateLoggedinUserApiV1UsersPut):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.957317
| 119
| 0.592967
|
055c2360c29f569dcf051d340b61add1876285e1
| 4,651
|
py
|
Python
|
app/models.py
|
Waithera-m/partage
|
293905385839b7c36847a46c91e142cb2df2a3ae
|
[
"Unlicense"
] | null | null | null |
app/models.py
|
Waithera-m/partage
|
293905385839b7c36847a46c91e142cb2df2a3ae
|
[
"Unlicense"
] | null | null | null |
app/models.py
|
Waithera-m/partage
|
293905385839b7c36847a46c91e142cb2df2a3ae
|
[
"Unlicense"
] | null | null | null |
from . import db
from datetime import datetime
from flask_login import UserMixin
from werkzeug.security import generate_password_hash,check_password_hash
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
'''
function queries and returns user with a given id
'''
return User.query.get(int(user_id))
class User(db.Model,UserMixin):
'''
class faciliates the creation of user objects
'''
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key=True)
email = db.Column(db.String(255),unique=True,index=True)
username = db.Column(db.String(255),index=True)
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
bio = db.Column(db.String(255))
profile_photo_path = db.Column(db.String())
password_hash = db.Column(db.String(255))
posts = db.relationship('Post',backref='user',lazy="dynamic")
blogs = db.relationship('Blog',backref='user',lazy="dynamic")
comments = db.relationship('Comments',backref='user',lazy='dynamic')
@property
def password(self):
'''
function blocks access to password property
'''
raise AttributeError('Password attribute cannot be read')
@password.setter
def password(self,password):
'''
function generates password hash
'''
self.password_hash = generate_password_hash(password)
def verify_password(self,password):
'''
function checks if entered and hashed passwords match
'''
return check_password_hash(self.password_hash,password)
def __repr__(self):
return f'User {self.username}'
class Role(db.Model):
'''
class facilitates the creation of role objects
'''
__tablename__='roles'
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(255))
users = db.relationship('User',backref='role',lazy="dynamic")
def __repr__(self):
return f'User {self.name}'
class Blog(db.Model):
'''
class facilitates the creation of blog objects
'''
__tablename__ = 'blogs'
id = db.Column(db.Integer,primary_key=True)
title = db.Column(db.String(70))
created_at = db.Column(db.DateTime, default=datetime.utcnow)
tag_id = db.Column(db.Integer,db.ForeignKey('tags.id'))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
class Post(db.Model):
'''
class facilitates the creation of post objects
'''
__tablename__ = 'posts'
id = db.Column(db.Integer,primary_key=True)
title = db.Column(db.String(70))
content = db.Column(db.Text)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
tag_id = db.Column(db.Integer,db.ForeignKey('tags.id'))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
comments = db.relationship('Comments',backref='post',lazy='dynamic')
@classmethod
def get_post(cls,id):
'''
function queries database and returns pitch with given id
'''
post = cls.query.filter_by(id=id).first()
return post
class Tag(db.Model):
'''
class supports the creation of tag objects
'''
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(23))
blogs = db.relationship('Blog',backref = 'tag',lazy='dynamic')
posts = db.relationship('Post',backref = 'tag',lazy='dynamic')
def __repr__(self):
return f'User {self.username}'
class Quote:
'''
class facilitates the creation of quote objects
'''
def __init__(self,id,author,quote):
'''
function facilitates the creation of quote properties
Args:
self.id: quote's id
self.author:quote's author
self.quote:quote content
'''
self.id = id
self.author = author
self.quote = quote
class Comments(db.Model):
'''
class facilitates the creation of comment objects
'''
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key=True)
comment = db.Column(db.String(1000))
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
post_id = db.Column(db.Integer,db.ForeignKey("posts.id"))
def save_comment(self):
'''
function saves comments to the database
'''
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,post_id):
'''
function retrieves post-specific comments
'''
comments = Comments.query.filter_by(post_id=post_id).all()
return comments
| 25.696133
| 72
| 0.639647
|
c38fddd53a0fa233ab4d850b571d1d01836a5a5d
| 2,559
|
py
|
Python
|
rivernet_prep/call_streamnet_downsample.py
|
BrisClimate/flood-cascade
|
660c29275a87785153d0f107ed23104fcbcbddee
|
[
"MIT"
] | null | null | null |
rivernet_prep/call_streamnet_downsample.py
|
BrisClimate/flood-cascade
|
660c29275a87785153d0f107ed23104fcbcbddee
|
[
"MIT"
] | null | null | null |
rivernet_prep/call_streamnet_downsample.py
|
BrisClimate/flood-cascade
|
660c29275a87785153d0f107ed23104fcbcbddee
|
[
"MIT"
] | 3
|
2020-11-08T16:01:47.000Z
|
2021-01-13T17:13:32.000Z
|
# Python script to process downsampled hydrography from 'downsample_hydro.py
# Calls streamnet function and creates 'rec'.csv (using jsosa/LFPtools), to describe the river network
#
# Requires:
# mpiexec
# streamnet from TauDEM (https://github.com/dtarb/TauDEM)
# split module from LFPtools (https://github.com/jsosa/LFPtools)
# Import python modules
import os,sys,subprocess,argparse
sys.path.append('/home/pu17449/gitsrc/LFPtools/lfptools') # Folder containing split.py
from split import connections
# Paths of executables to run TauDEM
TauDEM_bindir = '/home/pu17449/gitsrc/TauDEM/bin' # contains streamnet executable
mpiexec_bindir = '/usr/bin' # contains mpiexec executable
parser = argparse.ArgumentParser(description='Call TauDEM and LFPtools scripts to produce vectorised river network files corresponding to downsampled river network calculated by downsample_hydro.py')
parser.add_argument('-r','--res',default = '9s',help = 'Resolution of downsampled river network in arcseconds (e.g. "9s" or "15s")')
parser.add_argument('-d','--datadir',help = 'Directory containing river network data (input and output)')
args = parser.parse_args()
res = args.res
datadir = args.datadir
# resolution of downscaled hydrography
#res = '30s'
#datadir = '/home/pu17449/data2/lfp-tools/splitd8_v2/077'
# input files (produced by 'downsample_hydro.py')
fdir = os.path.join(datadir,'dir_d8_downsample_'+res+'.tif')
fnet = os.path.join(datadir,'net_downsample_'+res+'.tif')
fdem = os.path.join(datadir,'dem_downsample_'+res+'.tif')
facc = os.path.join(datadir,'acc_downsample_'+res+'.tif')
ford = os.path.join(datadir,'ord_downsample_'+res+'.tif')
# output files
ftree = os.path.join(datadir,'strn_tree_'+res+'d8.txt')
fcoord = os.path.join(datadir,'strn_coord_'+res+'d8.txt')
fnetwork = os.path.join(datadir,'strn_network_'+res+'d8.out')
fwatershed = os.path.join(datadir,'strn_watershed_'+res+'d8.tif')
frec = os.path.join(datadir,'rec_downsample_'+res+'.csv')
# Call streamnet
if not os.path.exists(fnetwork):
print('Calling streamnet')
cmd = ['mpiexec','-n','4','streamnet','-fel',fdem, '-p',fdir, '-ad8',facc ,'-src',fnet, '-tree',ftree, '-coord',fcoord, '-net',fnetwork, '-w',fwatershed]
print(cmd)
ret = subprocess.call(cmd,env={'PATH':TauDEM_bindir+':'+mpiexec_bindir})
else:
print('streamnet already exists, skipping')
ret = 0
if ret==0:
print('Creating rec file from tree and coords text files')
# Creating rec dataframe
rec = connections(ftree, fcoord)
# Writing XXX_rec.csv file
rec.to_csv(frec)
else:
print('streamnet failed: aborting')
| 41.95082
| 199
| 0.745604
|
a4b675268ac471c4a3ebf5138fd69725cd7f6186
| 1,591
|
py
|
Python
|
tests/test_tensorboard.py
|
TreeKid/stable-baselines
|
129c1958160b95962b887c312cd2273aed35df60
|
[
"MIT"
] | 19
|
2020-06-26T18:45:35.000Z
|
2022-03-08T14:20:32.000Z
|
tests/test_tensorboard.py
|
TreeKid/stable-baselines
|
129c1958160b95962b887c312cd2273aed35df60
|
[
"MIT"
] | 7
|
2020-11-13T17:48:40.000Z
|
2022-03-12T00:35:14.000Z
|
tests/test_tensorboard.py
|
TreeKid/stable-baselines
|
129c1958160b95962b887c312cd2273aed35df60
|
[
"MIT"
] | 7
|
2019-10-01T05:49:22.000Z
|
2021-12-24T07:11:55.000Z
|
import os
import shutil
import pytest
from stable_baselines import A2C, ACER, ACKTR, DQN, DDPG, PPO1, PPO2, SAC, TD3, TRPO
TENSORBOARD_DIR = '/tmp/tb_dir/'
if os.path.isdir(TENSORBOARD_DIR):
shutil.rmtree(TENSORBOARD_DIR)
MODEL_DICT = {
'a2c': (A2C, 'CartPole-v1'),
'acer': (ACER, 'CartPole-v1'),
'acktr': (ACKTR, 'CartPole-v1'),
'dqn': (DQN, 'CartPole-v1'),
'ddpg': (DDPG, 'Pendulum-v0'),
'ppo1': (PPO1, 'CartPole-v1'),
'ppo2': (PPO2, 'CartPole-v1'),
'sac': (SAC, 'Pendulum-v0'),
'td3': (TD3, 'Pendulum-v0'),
'trpo': (TRPO, 'CartPole-v1'),
}
N_STEPS = 1000
@pytest.mark.parametrize("model_name", MODEL_DICT.keys())
def test_tensorboard(model_name):
logname = model_name.upper()
algo, env_id = MODEL_DICT[model_name]
model = algo('MlpPolicy', env_id, verbose=1, tensorboard_log=TENSORBOARD_DIR)
model.learn(N_STEPS)
model.learn(N_STEPS, reset_num_timesteps=False)
assert os.path.isdir(TENSORBOARD_DIR + logname + "_1")
assert not os.path.isdir(TENSORBOARD_DIR + logname + "_2")
@pytest.mark.parametrize("model_name", MODEL_DICT.keys())
def test_multiple_runs(model_name):
logname = "tb_multiple_runs_" + model_name
algo, env_id = MODEL_DICT[model_name]
model = algo('MlpPolicy', env_id, verbose=1, tensorboard_log=TENSORBOARD_DIR)
model.learn(N_STEPS, tb_log_name=logname)
model.learn(N_STEPS, tb_log_name=logname)
assert os.path.isdir(TENSORBOARD_DIR + logname + "_1")
# Check that the log dir name increments correctly
assert os.path.isdir(TENSORBOARD_DIR + logname + "_2")
| 31.196078
| 84
| 0.688246
|
3f0757d8d95aff8b257f6a2291725fba81464eac
| 836
|
py
|
Python
|
MergeSortRecursive.py
|
sidhu177/pythonprog
|
a75285e9e4d3cd6f1257b9a79dc39e49c68a695d
|
[
"MIT"
] | 2
|
2019-05-01T04:32:07.000Z
|
2019-05-04T02:22:16.000Z
|
MergeSortRecursive.py
|
sidhu177/pythonprog
|
a75285e9e4d3cd6f1257b9a79dc39e49c68a695d
|
[
"MIT"
] | null | null | null |
MergeSortRecursive.py
|
sidhu177/pythonprog
|
a75285e9e4d3cd6f1257b9a79dc39e49c68a695d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
taken from Data Structures and Algorithms using Python, Lee and Hubbard, Springer
"""
def merge(seq,start,mid,stop):
lst = []
i = start
j = mid
while i < mid and j < stop:
if seq[i] <seq[j]:
lst.append(seq[i])
i+=1
else:
lst.append(seq[j])
j+=1
while i<mid:
lst.append(seq[i])
i+=1
for i in range(len(lst)):
seq[start+i]=lst[i]
def mergeSortRecursively(seq,start,stop):
if start >= stop-1:
return
mid = (start+stop)//2
mergeSortRecursively(seq,start,mid)
mergeSortRecursively(seq,mid,stop)
merge(seq,start,mid,stop)
def mergeSort(seq):
mergeSortRecursively(seq,0,len(seq))
| 22.594595
| 82
| 0.503589
|
f49ead240d62e2d34df1e26b24971939c8c51227
| 100
|
py
|
Python
|
pan_cortex_data_lake/adapters/__init__.py
|
Faqa/pan-cortex-data-lake-python
|
cfa0cfca48e7c659e173ded9687b48ff08ef1d69
|
[
"ISC"
] | 27
|
2018-03-19T08:12:47.000Z
|
2019-12-10T04:15:00.000Z
|
pan_cortex_data_lake/adapters/__init__.py
|
Faqa/pan-cortex-data-lake-python
|
cfa0cfca48e7c659e173ded9687b48ff08ef1d69
|
[
"ISC"
] | 55
|
2018-03-16T19:53:04.000Z
|
2019-04-03T16:34:26.000Z
|
pan_cortex_data_lake/adapters/__init__.py
|
Faqa/pan-cortex-data-lake-python
|
cfa0cfca48e7c659e173ded9687b48ff08ef1d69
|
[
"ISC"
] | 9
|
2018-03-21T18:03:42.000Z
|
2019-10-06T03:56:58.000Z
|
# -*- coding: utf-8 -*-
"""Adapters package."""
from .adapter import StorageAdapter # noqa: F401
| 16.666667
| 49
| 0.64
|
f84aab692d42b9386991062eaaa4f123fa7b95c1
| 12,549
|
py
|
Python
|
main.py
|
Trxppy/ecosystem-simulator
|
df7e953813c42978ec2588dec363d4b4000817a7
|
[
"MIT"
] | null | null | null |
main.py
|
Trxppy/ecosystem-simulator
|
df7e953813c42978ec2588dec363d4b4000817a7
|
[
"MIT"
] | null | null | null |
main.py
|
Trxppy/ecosystem-simulator
|
df7e953813c42978ec2588dec363d4b4000817a7
|
[
"MIT"
] | null | null | null |
# import modules
from classes.environment import *
from classes.plant import *
# startup file
version = "0.2.0"
print("-- Ecosystem Simulator")
print("--- Developed by Matthew Grant")
print("---- Version " + str(version) + "\n")
# setup stages vars
program_active = True
environment_setup_active = True
organism_setup_active = True
menu_active = False
# import organism data from file
def import_organism(data):
# parse data
organism = data.split(" ")
organism_name = organism[0]
organism_count = organism[1]
# check plants
with open('user/plants.txt', "r") as f:
for line in f:
this_organism = json.loads(line)
name = this_organism["species"]
if(name.lower() == organism_name.lower()):
return ["plant", line]
# check animals
with open('user/animals.txt', "r") as f:
for line in f:
this_organism = json.loads(line)
name = this_organism["species"]
if(name.lower() == organism_name.lower()):
return ["animal", line]
return False
def show_rename_menu():
# handle organism rename
print("\nPlease enter the name of the organism and the new desired name (ex: pine1 pine2)")
data = input()
# parse data
organism = data.split(" ")
organism_current_name = organism[0]
organism_new_name = organism[1]
# scan plant file
organism_exists = True
organisms = {}
with open('user/plants.txt', "r") as f:
# check plant save file for duplicate names
for line in f:
# make sure organism exists and isn't a duplicate
data = json.loads(line)
if(data["species"] == organism_new_name):
print("Organism already exists. Please re-enter your statement with a different name:")
return
if(data["species"] == organism_current_name):
organism_exists = True
if(organism_exists == False):
# return method if organism wasn't found in initial search
print("ORGANISM NOT FOUND. PLEASE TRY AGAIN")
return
else:
# otherwise, save data to temporary array (organisms)
organisms[data["species"]] = data
# rewrite plants file
if(organism_exists):
# update plant data for selected species
organisms[organism_current_name]["species"] = organism_new_name # update species name
with open('user/plants.txt', "w+") as f:
for index in organisms:
if(organisms[index]["parent"] == organism_current_name):
# update parent species name if it matches the selected species
organisms[index]["parent"] = organism_new_name
f.write(json.dumps(organisms[index]) + "\n")
while(program_active):
print("ENVIRONMENT SETUP ACTIVE")
# constant variables
env_size_min = 4
env_size_max = 25
simulation_runtime_max = 5000
# initialize start conditions (environment setup)
while(environment_setup_active):
inputValid = False
while(inputValid == False):
# validate environment size
print("\nPlease enter the size of the environment:")
env_size = input()
if(env_size.isnumeric() == False):
# check if number is an int
print("Sorry! Environment size must be an integer.")
else:
# more detailed checks
env_size = int(env_size)
if(env_size < env_size_min):
print("Sorry! Environment size must be at least " + str(env_size_min) + " ("+ str(env_size_min) + "x" + str(env_size_min) + ")")
elif(env_size > env_size_max):
print("Sorry! Environment size cannot exceed " + str(env_size_max) + " ("+ str(env_size_max) + "x" + str(env_size_max) + ")")
else:
print("Enter 'Y' to confirm an environment size of " + str(env_size) + " ("+ str(env_size) + "x" + str(env_size) + ")")
confirm_input = input()
if(confirm_input.lower() == 'y'):
inputValid = True
inputValid = False
while(inputValid == False):
# validate environment water percentage
print("\nPlease enter the percentage of surface water in the environment (1-100):")
env_water_percentage = input()
if(env_water_percentage.isnumeric() == False):
# check if number is an int
print("Sorry! Environment water percentage must be an integer.")
else:
# more detailed checks
env_water_percentage = int(env_water_percentage)
if(env_water_percentage < 1):
print("Sorry! Environment water percentage must be at least 1%")
elif(env_water_percentage > 100):
print("Sorry! Environment water percentage cannot exceed 100%")
else:
print("Enter 'Y' to confirm an environment water percentage of " + str(env_water_percentage) + "%")
confirm_input = input()
if(confirm_input.lower() == 'y'):
inputValid = True
inputValid = False
while(inputValid == False):
# validate environment water distribution
print("\nPlease enter the environment's water distribution (1-100):")
env_water_distribution = input()
if(env_water_distribution.isnumeric() == False):
# check if number is an int
print("Sorry! Environment water distribution must be an integer.")
else:
# more detailed checks
env_water_distribution = int(env_water_distribution)
if(env_water_distribution < 1):
print("Sorry! Environment water distribution must be at least 1%")
elif(env_water_distribution > 100):
print("Sorry! Environment water distribution cannot exceed 100%")
else:
print("Enter 'Y' to confirm an environment water distribution of " + str(env_water_distribution) + "%")
confirm_input = input()
if(confirm_input.lower() == 'y'):
inputValid = True
inputValid = False
while(inputValid == False):
# validate environment rainfall frequency
print("\nPlease enter the environment's rainfall frequency (1-100):")
env_rainfall_frequency = input()
if(env_rainfall_frequency.isnumeric() == False):
# check if number is an int
print("Sorry! Environment rainfall frequency must be an integer.")
else:
# more detailed checks
env_rainfall_frequency = int(env_rainfall_frequency)
if(env_rainfall_frequency < 1):
print("Sorry! Environment rainfall frequency must be at least 1%")
elif(env_rainfall_frequency > 100):
print("Sorry! Environment rainfall frequency cannot exceed 100%")
else:
print("Enter 'Y' to confirm an environment rainfall frequency of " + str(env_rainfall_frequency) + "%")
confirm_input = input()
if(confirm_input.lower() == 'y'):
inputValid = True
# initialize environment object
env = Environment(env_size, env_water_percentage, env_water_distribution, env_rainfall_frequency)
inputValid = False
while(inputValid == False):
# validate simulation length
print("\nPlease enter the length of the simulation:")
simulation_runtime = input()
if(simulation_runtime.isnumeric() == False):
# check if number is an int
print("Sorry! Simulation length must be an integer.")
else:
# more detailed checks
simulation_runtime = int(simulation_runtime)
if(simulation_runtime > simulation_runtime_max):
print("Sorry! Simulation length cannot exceed {}".format(simulation_runtime_max))
else:
print("Enter 'Y' to confirm a simulation length of {}".format(simulation_runtime))
confirm_input = input()
if(confirm_input.lower() == 'y'):
inputValid = True
environment_setup_active = False
# import organisms into the new environment (organism setup)
while(organism_setup_active):
plants = []
animals = []
print("\nIMPORT ORGANISMS")
print("Please enter the name of the organism and its spawn count (ex: pine 4)")
print("Enter 'DONE' to continue")
looping = True # for input validation
while(looping): # input loop (user can continue to make entries until they opt out of the loop via "done" command)
organism_found = False
while(organism_found == False): # input validation
data = input()
if(data.lower() == 'done'):
looping = False
organism_setup_active = False
organism_found = True
else:
print("Enter 'Y' to confirm this import statement")
confirm_input = input()
if(confirm_input.lower() == 'y'):
organism_name = data.split(" ")[0]
organism_count = data.split(" ")[1]
if(import_organism(data) != False):
organism_type = import_organism(data)[0]
organism = import_organism(data)[1]
organism_found = True
if(organism_type == "animal"):
animals.append([organism, organism_count])
else:
plants.append([organism, organism_count])
else:
print("Organism not found. Please re-enter the import statement:")
else:
print("Please re-enter the import statement:")
# handle simulation
print("\n\nSIMULATION SETUP COMPLETE")
menu_active = True
simulation_run = False
while(menu_active):
print("Please enter a command:")
command = input()
if(command == "run"):
# run simulation
env.simulate(simulation_runtime, plants, animals)
simulation_run = True
print("\n")
elif(command == "restart"):
# restart simulation
menu_active = False
environment_setup_active = True
organism_setup_active = True
print("\n")
elif(command == "repopulate"):
# repopulate simulation
menu_active = False
organism_setup_active = True
elif(command == "rename"):
# rename organisms
show_rename_menu()
elif(command == "merge"):
# save and merge simulation data
if(simulation_run):
env.merge(simulation_runtime)
print("\n")
else:
print("Please run simulation first!\n")
elif(command == "end"):
# end simulation
menu_active = False # exit
program_active = False
elif(command == "help"):
# view commands options
print("\nCOMMAND LIST:")
print("run -> run simulation with given setup parameters")
print("restart -> restart simulation with given setup parameters")
print("repopulate -> re-enter starting organism parameters")
print("rename -> rename all references of an organism")
print("merge -> merge the organism data from previously run simulation into the global data")
print("end -> exit simulation")
print("help -> view list of commands")
print("--------------------------------\n")
else:
# if invalid input detected
print("INVALID COMMAND '{}'\n".format(command))
| 44.031579
| 148
| 0.553431
|
8eddf413e1fea5d428259911adbba6e19c4a641b
| 11,500
|
py
|
Python
|
wpa_pyfi/network.py
|
garretthagen21/rpi3-wifi
|
540f4e7bfe4ffa4c4e02d3e6cfea80a4438de2ca
|
[
"BSD-2-Clause"
] | null | null | null |
wpa_pyfi/network.py
|
garretthagen21/rpi3-wifi
|
540f4e7bfe4ffa4c4e02d3e6cfea80a4438de2ca
|
[
"BSD-2-Clause"
] | null | null | null |
wpa_pyfi/network.py
|
garretthagen21/rpi3-wifi
|
540f4e7bfe4ffa4c4e02d3e6cfea80a4438de2ca
|
[
"BSD-2-Clause"
] | 1
|
2021-12-12T08:20:24.000Z
|
2021-12-12T08:20:24.000Z
|
#!/usr/bin/python3
#
# @file network.py
#
# @brief This class handles network configurations that are loaded to and from wpa_supplicant.conf
#
# @author Garrett Hagen <garretthagen21@gmail.com>
#
# @date 2021-01-03
#
import subprocess
from ifconfigparser import IfconfigParser
from operator import attrgetter
class Network(object):
"""Represents a single network block in wpa_supplicant.conf."""
WPA_SUPPLICANT_CONFIG = "/etc/wpa_supplicant/wpa_supplicant.conf"
DEFAULT_INTERFACE = 'wlan0'
@classmethod
def for_file(cls, wpa_supplicant_config):
"""
A class factory for providing a nice way to specify the interfaces file
that you want to use. Use this instead of directly overwriting the
interfaces Class attribute if you care about thread safety.
"""
return type(cls)(cls.__name__, (cls,), {
'WPA_SUPPLICANT_CONFIG': wpa_supplicant_config,
})
def __init__(self, ssid, **opts):
self.ssid = ssid
self.opts = opts
self.interface = Network.DEFAULT_INTERFACE
def __repr__(self):
string = 'network={\n'
string += '\tssid="{}"\n'.format(self.ssid)
for opt, val in self.opts.items():
string += '\t{}={}\n'.format(opt, val)
string += '}'
return string
@property
def nickname(self):
return self.opts.get("id_str")
@property
def priority(self):
priority = self.opts.get("priority")
if priority is None:
priority = 0
return int(priority)
def set_interface(self, interface):
self.interface = interface
def save(self, overwrite=True, supplicant_file=None):
u"""Write to the appropriate config file.
Will refuse to overwrite an already present file unless explicitly told
to do so via the 'overwrite' parameter.
Arguments:
overwrite -- ignore any existing files. Defaults to False.
supplicant_file -- the file in which the config will be written.
Defaults to WPA_SUPPLICANT_CONFIG (global setting).
"""
# Set to default config if unspecified
if not supplicant_file:
supplicant_file = self.WPA_SUPPLICANT_CONFIG
# Handle any existing networks
existing_network = self.find(self.ssid)
if existing_network:
if not overwrite:
print("Network: " + str(
self) + " already exists in " + supplicant_file + " and overwrite is False. Ignoring save")
return
else:
existing_network.delete()
# Save the file
with open(supplicant_file, 'a') as wpa_config:
wpa_config.write('\n')
wpa_config.write(str(self))
wpa_config.write('\n')
def delete(self, supplicant_file=None, disconnect_immediately=True):
"""
Deletes the configuration from the :attr:`interfaces` file.
"""
# Set to default config if unspecified
if not supplicant_file:
supplicant_file = self.WPA_SUPPLICANT_CONFIG
# Read in the contents of supplicant conf
file_in = open(supplicant_file, 'r')
all_lines = file_in.readlines()
file_in.close()
curr_index = 0
entry_start_index = None
entry_end_index = None
entry_found = False
# Iterate through the files to see what we will keep
for line in all_lines:
line = line.strip()
# If line exists and is not whitespace or comment we will analyze it
if line and not line.startswith("#"):
# We are at the beginning of an entry block and have not found our entry yet
if "network" and "{" in line and not entry_found:
entry_start_index = curr_index
# The current line contains the ssid or nick name we are looking for and we are in a block
elif ("ssid" and self.ssid in line) or \
(self.nickname and ("id_str" and self.nickname in line)) \
and entry_start_index:
entry_found = True
# We have reached end bracket, have found our entry, and have not yet set the end block
elif "}" in line and entry_found and not entry_end_index:
entry_end_index = curr_index
curr_index += 1
# If we have valid indices and an entry has been found, remove everything between the two indicies
if entry_found and entry_start_index and entry_end_index:
# End index is calculated inclusively, but sliced exclusively so add 1
entry_end_index = min(entry_end_index + 1, len(all_lines))
# Remove the entry from all lines
all_lines = all_lines[:entry_start_index] + all_lines[entry_end_index:]
# Piece the list back together
wpa_supplicant_temp_file = supplicant_file + ".tmp"
file_output = ''.join(all_lines)
temp_file = open(wpa_supplicant_temp_file, 'w')
temp_file.write(file_output)
temp_file.close()
# Overwrite the actual file with the temp file
subprocess.check_output(['mv', wpa_supplicant_temp_file, supplicant_file])
# Reload wpa client in case we deleted the network we were connected to
if disconnect_immediately:
self._reload_wpa_client(reconfigure_priority=False)
return True
else:
print("Could Not Find Entry for Network: " + self.ssid + " in " + supplicant_file)
return False
def add_option(self, option_key, option_value):
self.opts[option_key] = option_value
def activate(self):
"""
Connects to the network as configured in this scheme.
"""
# Adjust our priority to be the highest
self.add_option('priority', max(self.all(), key=attrgetter('priority')).priority + 1)
# Update supplicant file with our new priority
self.save(overwrite=True)
output = self._reload_wpa_client()
if 'OK' not in output:
raise ConnectionError("An error occured during wpa_cli reconfigure %r\n\nwpa_cli Output:" + output % self)
def get_connection_data(self):
ifconfig_output = str(subprocess.check_output(['ifconfig']))
try:
ifconfig_parse = IfconfigParser(console_output=ifconfig_output)
return ifconfig_parse.get_interface(self.interface)
except Exception as e:
print("An error occured looking for interface: " + self.interface)
print("Stack trace: " + str(e))
return None
def _reload_wpa_client(self, reconfigure_priority=True):
# Normalize priorities in the supplicant file
if reconfigure_priority:
self.reconfigure_priority()
# Restart connection management
subprocess.check_output(['ifconfig', self.interface, 'up'])
wpa_cli_output = subprocess.check_output(['wpa_cli', '-i', self.interface, 'reconfigure'],
stderr=subprocess.STDOUT).decode('utf-8')
return wpa_cli_output
@classmethod
def reconfigure_priority(cls):
"""Re adjust the priorities in the supplicant file so they are continously ranked"""
# Reorder the existing networks
all_networks = sorted(cls.all(), key=attrgetter('priority'))
network_num = 0
for network in all_networks:
old_priority = network.priority
# print("Network: "+network.ssid+" Priority: ("+str(old_priority)+" -> "+str(network_num)+")")
# Update the networks priority
network.add_option("priority", network_num)
network.save()
# Only increment priority for non-ambiguous networks
if old_priority > 0:
network_num += 1
@classmethod
def from_string(cls, string):
"""Create a new network object by parsing a string."""
lines = string.split("\n")
# throw away the first and last lines, and remove indentation
lines = [line.strip() for line in lines[1:-1]]
opts = {}
for line in lines:
split = line.split("=")
opt = split[0]
if len(split) == 2:
value = split[1]
else:
value = "=".join(split[1:])
opts[opt] = value
# remove the SSID from the other options and strip the quotes from it.
ssid = opts["ssid"][1:-1]
del opts["ssid"]
return cls(ssid, **opts)
@classmethod
def find(cls, ssid, name=None, supplicant_file=None):
# Set to default config if unspecified
if not supplicant_file:
supplicant_file = cls.WPA_SUPPLICANT_CONFIG
all_networks = cls.all(supplicant_file=supplicant_file)
# First try ssid
for network in all_networks:
if network.ssid == ssid:
return network
# If unsuccessful try name
for network in all_networks:
if network.nickname and name and network.nickname == name:
return network
return None
@classmethod
def all(cls, supplicant_file=None):
"""Extract all network blocks from a file.
Returns a list of Network objects.
"""
# Set to default config if unspecified
if not supplicant_file:
supplicant_file = cls.WPA_SUPPLICANT_CONFIG
with open(supplicant_file) as netfile:
config = netfile.read()
networks = []
in_block = False
netblock = []
for line in config.split("\n"):
line = line.strip()
if line.startswith("#"):
continue
# this is really crappy "parsing" but it should work
if line == "network={" and not in_block:
in_block = True
if in_block:
netblock.append(line)
if line == "}" and in_block:
in_block = False
nb = "\n".join(netblock)
networks.append(cls.from_string(nb))
netblock = []
return networks
# TODO: Priority should be configured so the new network can be set as highest priority in exisiting
@classmethod
def new_network(cls, ssid, passkey="", is_open=False, id_str=None, interface=DEFAULT_INTERFACE):
network = cls(ssid)
key_mgmt_type = "NONE"
if not is_open:
# check passphrase length
key_mgmt_type = "WPA-PSK"
pass_len = len(passkey)
if pass_len < 8 or pass_len > 63:
print("Passphrase must be 8..63 characters.")
network.opts["psk"] = '"{}"'.format(passkey)
# Add option params
network.set_interface(interface)
network.add_option("key_mgmt", key_mgmt_type)
network.add_option("priority", 0)
if id_str:
network.add_option("id_str", '"{}"'.format(id_str))
return network
@classmethod
def for_cell(cls, cell, passkey="", id_str="", interface=DEFAULT_INTERFACE):
return cls.new_network(cell.ssid, passkey=passkey, is_open=(not cell.encrypted), id_str=id_str,
interface=interface)
| 35.9375
| 118
| 0.600348
|
8c7a7370f81a5b81d08c87136688765dce556ada
| 4,554
|
py
|
Python
|
research/qa_kg/model_n2nmn/modules.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 3,326
|
2018-01-26T22:42:25.000Z
|
2022-02-16T13:16:39.000Z
|
research/qa_kg/model_n2nmn/modules.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 150
|
2017-08-28T14:59:36.000Z
|
2022-03-11T23:21:35.000Z
|
research/qa_kg/model_n2nmn/modules.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 1,474
|
2018-02-01T04:33:18.000Z
|
2022-03-08T07:02:20.000Z
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
class Modules:
def __init__(self, config, kb, word_vecs, num_choices, embedding_mat):
self.config = config
self.embedding_mat = embedding_mat
# kb has shape [N_kb, 3]
self.kb = kb
self.embed_keys_e, self.embed_keys_r, self.embed_vals_e = self.embed_kb()
# word_vecs has shape [T_decoder, N, D_txt]
self.word_vecs = word_vecs
self.num_choices = num_choices
def embed_kb(self):
keys_e, keys_r, vals_e = [], [], []
for idx_sub, idx_rel, idx_obj in self.kb:
keys_e.append(idx_sub)
keys_r.append(idx_rel)
vals_e.append(idx_obj)
embed_keys_e = tf.nn.embedding_lookup(self.embedding_mat, keys_e)
embed_keys_r = tf.nn.embedding_lookup(self.embedding_mat, keys_r)
embed_vals_e = tf.nn.embedding_lookup(self.embedding_mat, vals_e)
return embed_keys_e, embed_keys_r, embed_vals_e
def _slice_word_vecs(self, time_idx, batch_idx):
# this callable will be wrapped into a td.Function
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# time is highest dim in word_vecs
joint_index = tf.stack([time_idx, batch_idx], axis=1)
return tf.gather_nd(self.word_vecs, joint_index)
# All the layers are wrapped with td.ScopedLayer
def KeyFindModule(self,
time_idx,
batch_idx,
scope='KeyFindModule',
reuse=None):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: embed_keys_e x text_param -> att
# Input:
# embed_keys_e: [N_kb, D_txt]
# text_param: [N, D_txt]
# Output:
# att: [N, N_kb]
#
# Implementation:
# 1. Elementwise multiplication between embed_key_e and text_param
# 2. L2-normalization
with tf.variable_scope(scope, reuse=reuse):
m = tf.matmul(text_param, self.embed_keys_e, transpose_b=True)
att = tf.nn.l2_normalize(m, dim=1)
return att
def KeyFilterModule(self,
input_0,
time_idx,
batch_idx,
scope='KeyFilterModule',
reuse=None):
att_0 = input_0
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: and(embed_keys_r x text_param, att) -> att
# Input:
# embed_keys_r: [N_kb, D_txt]
# text_param: [N, D_txt]
# att_0: [N, N_kb]
# Output:
# att: [N, N_kb]
#
# Implementation:
# 1. Elementwise multiplication between embed_key_r and text_param
# 2. L2-normalization
# 3. Take the elementwise-min
with tf.variable_scope(scope, reuse=reuse):
m = tf.matmul(text_param, self.embed_keys_r, transpose_b=True)
att_1 = tf.nn.l2_normalize(m, dim=1)
att = tf.minimum(att_0, att_1)
return att
def ValDescribeModule(self,
input_0,
time_idx,
batch_idx,
scope='ValDescribeModule',
reuse=None):
att = input_0
# Mapping: att -> answer probs
# Input:
# embed_vals_e: [N_kb, D_txt]
# att: [N, N_kb]
# embedding_mat: [self.num_choices, D_txt]
# Output:
# answer_scores: [N, self.num_choices]
#
# Implementation:
# 1. Attention-weighted sum over values
# 2. Compute cosine similarity scores between the weighted sum and
# each candidate answer
with tf.variable_scope(scope, reuse=reuse):
# weighted_sum has shape [N, D_txt]
weighted_sum = tf.matmul(att, self.embed_vals_e)
# scores has shape [N, self.num_choices]
scores = tf.matmul(
weighted_sum,
tf.nn.l2_normalize(self.embedding_mat, dim=1),
transpose_b=True)
return scores
| 34.5
| 80
| 0.628898
|
337dc273a3a96c1b2acff01c11087521ce1cb7b5
| 569
|
py
|
Python
|
spacy_lookups_data/tests/test_hr.py
|
spacy-pl/spacy-lookups-data
|
54ed95b020a7b0f15df8f872346211d39edf5053
|
[
"MIT"
] | null | null | null |
spacy_lookups_data/tests/test_hr.py
|
spacy-pl/spacy-lookups-data
|
54ed95b020a7b0f15df8f872346211d39edf5053
|
[
"MIT"
] | null | null | null |
spacy_lookups_data/tests/test_hr.py
|
spacy-pl/spacy-lookups-data
|
54ed95b020a7b0f15df8f872346211d39edf5053
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
from spacy.lang.hr import Croatian
import pytest
@pytest.fixture(scope="session")
def hr_nlp():
return Croatian()
@pytest.mark.parametrize(
"string,lemma",
[
("trčao", "trčati"),
("adekvatnim", "adekvatan"),
("dekontaminacijama", "dekontaminacija"),
("filologovih", "filologov"),
("je", "biti"),
("se", "sebe"),
],
)
def test_hr_lemmatizer_lookup_assigns(hr_nlp, string, lemma):
tokens = hr_nlp(string)
assert tokens[0].lemma_ == lemma
| 21.074074
| 61
| 0.625659
|
7ec62be260c612073877078c19e391d54210c429
| 1,729
|
py
|
Python
|
setup.py
|
boogieLing/r0redis
|
15508c5cf6f86dab001af5cb70bcd5f3bf3777fe
|
[
"MIT"
] | null | null | null |
setup.py
|
boogieLing/r0redis
|
15508c5cf6f86dab001af5cb70bcd5f3bf3777fe
|
[
"MIT"
] | null | null | null |
setup.py
|
boogieLing/r0redis
|
15508c5cf6f86dab001af5cb70bcd5f3bf3777fe
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open('README.rst', 'r') as fp:
long_description = fp.read()
setup(
name='r0redis',
version='0.4.5',
description='Easily store, index, and modify Python dicts in Redis (with flexible searching)',
long_description=long_description,
author='r0',
author_email='boogieLing_o@163.com',
license='MIT',
url='https://github.com/boogieLing/r0-redis-helper',
download_url='https://github.com/boogieLing/r0-redis-helper/tarball/v0.4.1',
packages=find_packages(),
setup_requires=['pytest-runner'],
tests_require=['pytest'],
install_requires=[
'bg-helper',
'click>=6.0',
'dt-helper',
'fs-helper',
'hiredis==1.1.0',
'input-helper',
'pytz',
'redis==3.5.3',
'settings-helper',
'ujson==4.0.1',
],
include_package_data=True,
package_dir={'': '.'},
package_data={
'': ['*.ini'],
},
entry_points={
'console_scripts': [
'rh-download-examples=r0redis.scripts.download_examples:main',
'rh-download-scripts=r0redis.scripts.download_scripts:main',
'rh-notes=r0redis.scripts.notes:main',
'rh-shell=r0redis.scripts.shell:main',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Intended Audience :: Developers',
],
keywords=['redis', 'dictionary', 'secondary index', 'model', 'log', 'prototype', 'helper']
)
| 31.436364
| 98
| 0.591093
|
9d8163afe575fae6f06b87a5b0d20f6fe78aa35d
| 8,934
|
py
|
Python
|
benchmarks/exps/old/card233.py
|
SymbioticLab/Salus
|
b2a194e7e4654b51dbd8d8fc1577fb1e9915ca6f
|
[
"Apache-2.0"
] | 104
|
2019-02-12T20:41:07.000Z
|
2022-03-07T16:58:47.000Z
|
benchmarks/exps/old/card233.py
|
SymbioticLab/Salus
|
b2a194e7e4654b51dbd8d8fc1577fb1e9915ca6f
|
[
"Apache-2.0"
] | 9
|
2019-08-24T03:23:21.000Z
|
2021-06-06T17:59:07.000Z
|
benchmarks/exps/old/card233.py
|
SymbioticLab/Salus
|
b2a194e7e4654b51dbd8d8fc1577fb1e9915ca6f
|
[
"Apache-2.0"
] | 18
|
2019-03-04T07:45:41.000Z
|
2021-09-15T22:13:07.000Z
|
# -*- coding: future_fstrings -*-
#
# Copyright 2019 Peifeng Yu <peifeng@umich.edu>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
NSDI 19 Experiment
Job fairness, use one lane only
Scheduler: fairness
Work conservation: True
Collected data: per iteration speed
"""
from __future__ import absolute_import, print_function, division, unicode_literals
import csv
import logging
import time
import shutil
from datetime import datetime
from itertools import chain
from timeit import default_timer
from typing import Sequence, Union
from absl import flags
from benchmarks.driver.server import SalusServer
from benchmarks.driver.server.config import presets
from benchmarks.driver.utils import try_with_default, atomic_directory, kill_tree, prompt
from benchmarks.driver.utils.compatiblity import pathlib
from benchmarks.driver.workload import WTL, Executor, RunConfig, Workload
from benchmarks.exps import run_seq, Pause, maybe_forced_preset
FLAGS = flags.FLAGS
TBatchSize = Union[str, int]
logger = logging.getLogger(__name__)
flags.DEFINE_boolean('use_salus', True, 'Run on Salus or TF')
flags.DEFINE_integer('concurrent', 11, 'Concurrent workload allowed on Salus')
flags.DEFINE_boolean('fifo', False, 'Use FIFO for TF')
flags.DEFINE_float('overcommit', 1, 'Factor of amount of total memory in TF for over commit')
flags.DEFINE_float('phymem', 14 * (1024 ** 3), 'Amount of physical memory in GPU, in bytes')
flags.DEFINE_integer('scale_down', 1, 'Scale down iterations')
flags.DEFINE_string('sched', 'fair', "Salus iter scheduler")
def load_trace(path, ex):
path = pathlib.Path(path)
with path.open() as f:
reader = csv.DictReader(f)
def create_from_row(row):
name, bs = row['model_name'].split('_')
bs = try_with_default(int, bs, ValueError)(bs)
bn = int(row['iterations'])
submit_time = int(row['submit_time'])
if FLAGS.scale_down > 1:
bn = bn // FLAGS.scale_down
submit_time = submit_time / FLAGS.scale_down
w = WTL.create(name, bs, bn, ex)
w.env['SALUS_TOTAL_TIME'] = row['duration'] # seconds
return w, submit_time, row
return [create_from_row(row) for row in reader]
def find_geometry(w, field):
"""
:type w: Workload
:type field: str
"""
if w.geometry[field] is not None:
return w.geometry[field]
# check for another bn
for bn in w.wtl.available_batch_nums(w.batch_size):
g = WTL.from_name(w.name).geometry(RunConfig(w.batch_size, bn, None), w.executor)
if g[field] is not None:
w.geometry[field] = g[field]
return g[field]
return None
def main(argv):
# type: (Sequence[str]) -> None
scfg = maybe_forced_preset(presets.OpTracing)
scfg.scheduler = FLAGS.sched
scfg.extra_args = ['-v1', '--vmodule', 'tf_executor*=0']
ex = Executor.Salus if FLAGS.use_salus else Executor.TF
if FLAGS.fifo:
logdir = FLAGS.save_dir / 'fifo'
else:
logdir = FLAGS.save_dir / ex.value / scfg.scheduler
# create workload instances
workloads = load_trace(argv[0], ex)
# Check and update if workloads have the info we need
if ex == Executor.TF and not FLAGS.fifo:
for w, _, _ in workloads:
for field in ['peakmem']:
if find_geometry(w, field) is None:
raise ValueError(f'Missing {field} data for workload {w.canonical_name} of {w.batch_num} iters'
f', available geometries: {w.wtl._geometries}')
# enable overcommit
if FLAGS.overcommit > 1:
for w, _, _ in workloads:
w.env['TF_GPU_ALLOCATOR'] = 'cuda_managed'
def accept_workload(w, alive):
if FLAGS.fifo:
return len(alive) == 0
elif FLAGS.use_salus:
return len(alive) < FLAGS.concurrent
else:
currmem = sum(wl.geometry.peakmem for wl in alive)
return w.geometry.peakmem + currmem < FLAGS.overcommit * FLAGS.phymem
try:
try:
with atomic_directory(logdir) as tmp:
# copy trace file
shutil.copy2(argv[0], str(tmp/'trace.csv'))
with (tmp / 'card234.output').open('w') as f:
started = []
pending = []
alive = []
def workload_done(proc):
w = proc.workload
logger.info(f'Finished workload {w.output_name}.{w.batch_num}iter.{w.job_id}')
print(f'{datetime.now()}: Finished workload '
f'{w.output_name}.{w.batch_num}iter.{w.job_id}',
file=f)
def do_stuff(rel_time):
if workloads:
w, submit_time, row = workloads[0]
if rel_time >= submit_time:
workloads.pop(0)
w.job_id = row["job_id"]
logger.info(f'Queued workload {w.output_name}.{w.batch_num}iter.{w.job_id}')
print(f'{datetime.now()}: Queued workload '
f'{w.output_name}.{w.batch_num}iter.{w.job_id}',
file=f)
pending.append(w)
_, alive[:] = SalusServer.wait_workloads(alive, timeout=0, callback=workload_done)
while pending and accept_workload(pending[0], alive):
w = pending.pop(0)
logger.info(f'Started workload {w.output_name}.{w.batch_num}iter.{w.job_id}')
print(f'{datetime.now()}: Started workload '
f'{w.output_name}.{w.batch_num}iter.{w.job_id}',
file=f)
output_file = tmp / f'{w.output_name}.{w.batch_num}iter.{w.job_id}.output'
w.run(output_file)
started.append(w)
alive.append(w)
_, alive[:] = SalusServer.wait_workloads(alive, timeout=0, callback=workload_done)
if not workloads and not pending:
_, alive[:] = SalusServer.wait_workloads(alive, callback=workload_done)
return False
return True
def event_loop():
# check every 0.1 second
interval = 0.1
origin = default_timer()
while True:
st = default_timer()
should_continue = do_stuff(st - origin)
if not should_continue:
break
ed = default_timer()
elispped = ed - st
time.sleep(interval - (elispped % interval))
if FLAGS.use_salus:
scfg = scfg.copy(output_dir=logdir)
scfg.env['SALUS_DISABLE_LANEMGR'] = '1'
ss = SalusServer(scfg)
with ss.run():
event_loop()
else:
event_loop()
except Exception as ex:
logger.exception("Got exception when running workloads")
finally:
# if there's alive, we are doing cleanup
for w, _, _ in workloads:
if w.proc is not None and w.proc.poll() is None:
logger.warning(f'Killing workload that is not stopped yet: {w.canonical_name}')
kill_tree(w.proc, hard=True)
# check each workloads and fix workload output_file path
for w, _, _ in workloads:
if not FLAGS.ignore_error and w.proc is not None and w.proc.returncode != 0:
prompt.pause()
raise RuntimeError(f'Workload {w.canonical_name} did not finish cleanly: {w.proc.returncode}')
if w.output_file is not None:
w.output_file = logdir / w.output_file.name
| 39.184211
| 115
| 0.561898
|
89551b72d08b7daa3b86e6d52500c2b1c4b43371
| 32
|
py
|
Python
|
python/eda/eda/components/ST/__init__.py
|
32bitmicro/EDA
|
476a7f6dda23a494788bfdfaa27dff7082a80d6d
|
[
"BSD-3-Clause"
] | 1
|
2019-06-05T20:01:19.000Z
|
2019-06-05T20:01:19.000Z
|
python/eda/eda/components/ST/__init__.py
|
32bitmicro/EDA
|
476a7f6dda23a494788bfdfaa27dff7082a80d6d
|
[
"BSD-3-Clause"
] | null | null | null |
python/eda/eda/components/ST/__init__.py
|
32bitmicro/EDA
|
476a7f6dda23a494788bfdfaa27dff7082a80d6d
|
[
"BSD-3-Clause"
] | null | null | null |
from eda.components.ST import *
| 16
| 31
| 0.78125
|
385951b66a0a0fe4caa0afdbbe49563371afba3a
| 113
|
py
|
Python
|
test/schedule/apps.py
|
rishikumar/django_test_model
|
72c9fe41795da85d5e113f81e7705fc76439cd4e
|
[
"BSD-3-Clause"
] | null | null | null |
test/schedule/apps.py
|
rishikumar/django_test_model
|
72c9fe41795da85d5e113f81e7705fc76439cd4e
|
[
"BSD-3-Clause"
] | null | null | null |
test/schedule/apps.py
|
rishikumar/django_test_model
|
72c9fe41795da85d5e113f81e7705fc76439cd4e
|
[
"BSD-3-Clause"
] | null | null | null |
from django import apps
class AppConfig(apps.AppConfig):
name = 'test.schedule'
label = 'test.schedule'
| 18.833333
| 32
| 0.707965
|
77db8b89e6103769719b08fa74e6c96aa3d0df7a
| 1,921
|
py
|
Python
|
setup.py
|
Scout24/snakepit
|
afd7b1837fc9d2c083071413ce2be2eae7578f30
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Scout24/snakepit
|
afd7b1837fc9d2c083071413ce2be2eae7578f30
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Scout24/snakepit
|
afd7b1837fc9d2c083071413ce2be2eae7578f30
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script allows to support installation via:
# pip install git+git://github.com/pybuilder/pybuilder.git@<branch>
#
# This script is designed to be used in combination with `pip install` ONLY
#
# DO NOT RUN MANUALLY
#
import os
import subprocess
import sys
import glob
import shutil
script_dir = os.path.dirname(os.path.realpath(__file__))
build_script = os.path.join(script_dir, "build.py")
exit_code = 0
try:
subprocess.check_call([sys.executable, build_script, "clean", "install_dependencies", "package", "-o"])
dist_dir = glob.glob(os.path.join(script_dir, "target", "dist", "*"))[0]
for src_file in glob.glob(os.path.join(dist_dir, "*")):
file_name = os.path.basename(src_file)
target_file_name = os.path.join(script_dir, file_name)
if os.path.exists(target_file_name):
if os.path.isdir(target_file_name):
os.removedirs(target_file_name)
else:
os.remove(target_file_name)
shutil.move(src_file, script_dir)
setup_args = sys.argv[1:]
subprocess.check_call([sys.executable, "setup.py"] + setup_args, cwd=script_dir)
except subprocess.CalledProcessError as e:
exit_code = e.returncode
sys.exit(exit_code)
| 34.303571
| 107
| 0.7038
|
1ea26451d3671f87df15bdff948b7f9c503a19fd
| 152,248
|
py
|
Python
|
src/sage/schemes/elliptic_curves/ell_curve_isogeny.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 3
|
2019-07-15T13:48:24.000Z
|
2019-11-08T12:31:43.000Z
|
src/sage/schemes/elliptic_curves/ell_curve_isogeny.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 2
|
2018-10-30T13:40:20.000Z
|
2020-07-23T12:13:30.000Z
|
src/sage/schemes/elliptic_curves/ell_curve_isogeny.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 1
|
2020-07-23T10:29:58.000Z
|
2020-07-23T10:29:58.000Z
|
# -*- coding: utf-8 -*-
r"""
Isogenies
An isogeny `\varphi: E_1\to E_2` between two elliptic curves `E_1` and
`E_2` is a morphism of curves that sends the origin of `E_1` to the
origin of `E_2`. Such a morphism is automatically a morphism of group
schemes and the kernel is a finite subgroup scheme of `E_1`. Such a
subscheme can either be given by a list of generators, which have to
be torsion points, or by a polynomial in the coordinate `x` of the
Weierstrass equation of `E_1`.
The usual way to create and work with isogenies is illustrated with
the following example::
sage: k = GF(11)
sage: E = EllipticCurve(k,[1,1])
sage: Q = E(6,5)
sage: phi = E.isogeny(Q)
sage: phi
Isogeny of degree 7 from Elliptic Curve defined by y^2 = x^3 + x + 1 over Finite Field of size 11 to Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 11
sage: P = E(4,5)
sage: phi(P)
(10 : 0 : 1)
sage: phi.codomain()
Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 11
sage: phi.rational_maps()
((x^7 + 4*x^6 - 3*x^5 - 2*x^4 - 3*x^3 + 3*x^2 + x - 2)/(x^6 + 4*x^5 - 4*x^4 - 5*x^3 + 5*x^2), (x^9*y - 5*x^8*y - x^7*y + x^5*y - x^4*y - 5*x^3*y - 5*x^2*y - 2*x*y - 5*y)/(x^9 - 5*x^8 + 4*x^6 - 3*x^4 + 2*x^3))
The functions directly accessible from an elliptic curve ``E`` over a
field are ``isogeny`` and ``isogeny_codomain``.
The most useful functions that apply to isogenies are
- ``codomain``
- ``degree``
- ``domain``
- ``dual``
- ``rational_maps``
- ``kernel_polynomial``
.. WARNING::
Only cyclic, separable isogenies are implemented (except for [2]). Some
algorithms may need the isogeny to be normalized.
AUTHORS:
- Daniel Shumow <shumow@gmail.com>: 2009-04-19: initial version
- Chris Wuthrich : 7/09: changes: add check of input, not the full list is needed.
10/09: eliminating some bugs.
- John Cremona 2014-08-08: tidying of code and docstrings, systematic
use of univariate vs. bivariate polynomials and rational functions.
"""
#*****************************************************************************
# Copyright (C) 2009 Daniel Shumow <shumow@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from six import itervalues
from six.moves import range
from copy import copy
from sage.categories import homset
from sage.categories.morphism import Morphism
from sage.rings.all import PolynomialRing, Integer, LaurentSeriesRing
from sage.rings.polynomial.polynomial_element import is_Polynomial
from sage.schemes.elliptic_curves.all import EllipticCurve
from sage.schemes.elliptic_curves.ell_generic import is_EllipticCurve
from sage.rings.number_field.number_field_base import is_NumberField
from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism, isomorphisms
from sage.sets.set import Set
from sage.structure.richcmp import richcmp_not_equal, richcmp
#
# Private function for parsing input to determine the type of
# algorithm
#
def isogeny_determine_algorithm(E, kernel):
r"""
Helper function that allows the various isogeny functions to infer
the algorithm type from the parameters passed in.
INPUT:
- ``E`` (elliptic curve) -- an elliptic curve
- ``kernel`` -- either a list of points on ``E``, or a univariate
polynomial or list of coefficients of a univariate polynomial.
OUTPUT:
(string) either 'velu' or 'kohel'
If ``kernel`` is a list of points on the EllipticCurve `E`, then
we will try to use Velu's algorithm.
If ``kernel`` is a list of coefficients or a univariate
polynomial, we will try to use the Kohel's algorithms.
EXAMPLES:
This helper function will be implicitly called by the following examples::
sage: R.<x> = GF(5)[]
sage: E = EllipticCurve(GF(5), [0,0,0,1,0])
We can construct the same isogeny from a kernel polynomial::
sage: phi = EllipticCurveIsogeny(E, x+3)
or from a list of coefficients of a kernel polynomial::
sage: phi == EllipticCurveIsogeny(E, [3,1])
True
or from a rational point which generates the kernel::
sage: phi == EllipticCurveIsogeny(E, E((2,0)) )
True
In the first two cases, Kohel's algorithm will be used, while in
the third case it is Velu::
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import isogeny_determine_algorithm
sage: isogeny_determine_algorithm(E, x+3)
'kohel'
sage: isogeny_determine_algorithm(E, [3, 1])
'kohel'
sage: isogeny_determine_algorithm(E, E((2,0)))
'velu'
"""
kernel_is_list = isinstance(kernel, list)
if not kernel_is_list and kernel in E :
kernel = [kernel]
kernel_is_list = True
if (is_Polynomial(kernel) or ( kernel_is_list) and (kernel[0] in E.base_ring()) ):
algorithm = "kohel"
elif (kernel_is_list) and (kernel[0] in E):
# note that if kernel[0] is on an extension of E this
# condition will be false
algorithm = "velu"
else:
raise ValueError("Invalid Parameters to EllipticCurveIsogeny constructor.")
return algorithm
def isogeny_codomain_from_kernel(E, kernel, degree=None):
r"""
Compute the isogeny codomain given a kernel.
INPUT:
- ``E`` - The domain elliptic curve.
- ``kernel`` - Either a list of points in the kernel of the isogeny, or a
kernel polynomial (specified as a either a univariate
polynomial or a coefficient list.)
- ``degree`` - an integer, (default:``None``) optionally specified degree
of the kernel.
OUTPUT:
(elliptic curve) the codomain of the separable normalized isogeny
from this kernel
EXAMPLES::
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import isogeny_codomain_from_kernel
sage: E = EllipticCurve(GF(7), [1,0,1,0,1])
sage: R.<x> = GF(7)[]
sage: isogeny_codomain_from_kernel(E, [4,1], degree=3)
Elliptic Curve defined by y^2 + x*y + y = x^3 + 4*x + 6 over Finite Field of size 7
sage: EllipticCurveIsogeny(E, [4,1]).codomain() == isogeny_codomain_from_kernel(E, [4,1], degree=3)
True
sage: isogeny_codomain_from_kernel(E, x^3 + x^2 + 4*x + 3)
Elliptic Curve defined by y^2 + x*y + y = x^3 + 4*x + 6 over Finite Field of size 7
sage: isogeny_codomain_from_kernel(E, x^3 + 2*x^2 + 4*x + 3)
Elliptic Curve defined by y^2 + x*y + y = x^3 + 5*x + 2 over Finite Field of size 7
sage: E = EllipticCurve(GF(19), [1,2,3,4,5])
sage: kernel_list = [E((15,10)), E((10,3)),E((6,5))]
sage: isogeny_codomain_from_kernel(E, kernel_list)
Elliptic Curve defined by y^2 + x*y + 3*y = x^3 + 2*x^2 + 3*x + 15 over Finite Field of size 19
"""
algorithm = isogeny_determine_algorithm(E, kernel)
if ("velu"==algorithm):
# if we are using Velu's formula, just instantiate the isogeny
# and return the codomain
return EllipticCurveIsogeny(E, kernel).codomain()
elif ("kohel"==algorithm):
return compute_codomain_kohel(E, kernel, degree)
def compute_codomain_formula(E, v, w):
r"""
Compute the codomain curve given parameters `v` and `w` (as in
Velu / Kohel / etc formulas).
INPUT:
- ``E`` -- an elliptic curve
- ``v``, ``w`` -- elements of the base field of ``E``
OUTPUT:
The elliptic curve with invariants
`[a_1,a_2,a_3,a_4-5v,a_6-(a_1^2+4a_2)v-7w]` where
`E=[a_1,a_2,a_3,a_4,a_6]`.
EXAMPLES:
This formula is used by every Isogeny instantiation::
sage: E = EllipticCurve(GF(19), [1,2,3,4,5])
sage: phi = EllipticCurveIsogeny(E, E((1,2)) )
sage: phi.codomain()
Elliptic Curve defined by y^2 + x*y + 3*y = x^3 + 2*x^2 + 9*x + 13 over Finite Field of size 19
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_codomain_formula
sage: v = phi._EllipticCurveIsogeny__v
sage: w = phi._EllipticCurveIsogeny__w
sage: compute_codomain_formula(E, v, w) == phi.codomain()
True
"""
a1,a2,a3,a4,a6 = E.ainvs()
A4 = a4 - 5*v
A6 = a6 - (a1**2 + 4*a2)*v - 7*w
return EllipticCurve([a1, a2, a3, A4, A6])
def compute_vw_kohel_even_deg1(x0, y0, a1, a2, a4):
r"""
Compute Velu's (v,w) using Kohel's formulas for isogenies of
degree exactly divisible by 2.
INPUT:
- ``x0``, ``y0`` -- coordinates of a 2-torsion point on an elliptic curve E
- ``a1``, ``a2``, ``a4`` -- invariants of E
OUTPUT:
(tuple) Velu's isogeny parameters (v,w).
EXAMPLES:
This function will be implicitly called by the following example::
sage: E = EllipticCurve(GF(19), [1,2,3,4,5])
sage: phi = EllipticCurveIsogeny(E, [9,1]); phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 + x*y + 3*y = x^3 + 2*x^2 + 4*x + 5 over Finite Field of size 19 to Elliptic Curve defined by y^2 + x*y + 3*y = x^3 + 2*x^2 + 9*x + 8 over Finite Field of size 19
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_vw_kohel_even_deg1
sage: a1,a2,a3,a4,a6 = E.ainvs()
sage: x0 = -9
sage: y0 = -(a1*x0 + a3)/2
sage: compute_vw_kohel_even_deg1(x0, y0, a1, a2, a4)
(18, 9)
"""
v = (3*x0**2 + 2*a2*x0 + a4 - a1*y0)
w = x0*v
return (v,w)
def compute_vw_kohel_even_deg3(b2,b4,s1,s2,s3):
r"""
Compute Velu's (v,w) using Kohel's formulas for isogenies of
degree divisible by 4.
INPUT:
- ``b2``, ``b4`` -- invariants of an elliptic curve E
- ``s1``, ``s2``, ``s3`` -- signed coefficients of the 2-division
polynomial of E
OUTPUT:
(tuple) Velu's isogeny parameters (v,w).
EXAMPLES:
This function will be implicitly called by the following example::
sage: E = EllipticCurve(GF(19), [1,2,3,4,5])
sage: R.<x> = GF(19)[]
sage: phi = EllipticCurveIsogeny(E, x^3 + 7*x^2 + 15*x + 12); phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 + x*y + 3*y = x^3 + 2*x^2 + 4*x + 5 over Finite Field of size 19 to Elliptic Curve defined by y^2 + x*y + 3*y = x^3 + 2*x^2 + 3*x + 15 over Finite Field of size 19
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_vw_kohel_even_deg3
sage: (b2,b4) = (E.b2(), E.b4())
sage: (s1, s2, s3) = (-7, 15, -12)
sage: compute_vw_kohel_even_deg3(b2, b4, s1, s2, s3)
(4, 7)
"""
temp1 = (s1**2 - 2*s2)
v = 3*temp1 + b2*s1/2 + 3*b4/2
w = 3*(s1**3 - 3*s1*s2 + 3*s3) + b2*temp1/2 + b4*s1/2
return (v,w)
def compute_vw_kohel_odd(b2,b4,b6,s1,s2,s3,n):
r"""
Compute Velu's (v,w) using Kohel's formulas for isogenies of odd
degree.
INPUT:
- ``b2``, ``b4``, ``b6`` -- invariants of an elliptic curve E
- ``s1``, ``s2``, ``s3`` -- signed coefficients of lowest powers
of x in the kernel polynomial.
- ``n`` (int) -- the degree
OUTPUT:
(tuple) Velu's isogeny parameters (v,w).
EXAMPLES:
This function will be implicitly called by the following example::
sage: E = EllipticCurve(GF(19), [18,17,16,15,14])
sage: R.<x> = GF(19)[]
sage: phi = EllipticCurveIsogeny(E, x^3 + 14*x^2 + 3*x + 11); phi
Isogeny of degree 7 from Elliptic Curve defined by y^2 + 18*x*y + 16*y = x^3 + 17*x^2 + 15*x + 14 over Finite Field of size 19 to Elliptic Curve defined by y^2 + 18*x*y + 16*y = x^3 + 17*x^2 + 18*x + 18 over Finite Field of size 19
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_vw_kohel_odd
sage: (b2,b4,b6) = (E.b2(), E.b4(), E.b6())
sage: (s1,s2,s3) = (-14,3,-11)
sage: compute_vw_kohel_odd(b2,b4,b6,s1,s2,s3,3)
(7, 1)
"""
v = 6*(s1**2 - 2*s2) + b2*s1 + n*b4
w = 10*(s1**3 - 3*s1*s2 + 3*s3) + 2*b2*(s1**2 - 2*s2) + 3*b4*s1 + n*b6
return (v,w)
def compute_codomain_kohel(E, kernel, degree):
r"""
Compute the codomain from the kernel polynomial using Kohel's
formulas.
INPUT:
- ``E`` -- an elliptic curve
- ``kernel`` (polynomial or list) -- the kernel polynomial, or a
list of its coefficients
- ``degree`` (int) -- degree of the isogeny
OUTPUT:
(elliptic curve) -- the codomain elliptic curve ``E``/``kernel``
EXAMPLES::
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_codomain_kohel
sage: E = EllipticCurve(GF(19), [1,2,3,4,5])
sage: phi = EllipticCurveIsogeny(E, [9,1])
sage: phi.codomain() == isogeny_codomain_from_kernel(E, [9,1])
True
sage: compute_codomain_kohel(E, [9,1], 2)
Elliptic Curve defined by y^2 + x*y + 3*y = x^3 + 2*x^2 + 9*x + 8 over Finite Field of size 19
sage: R.<x> = GF(19)[]
sage: E = EllipticCurve(GF(19), [18,17,16,15,14])
sage: phi = EllipticCurveIsogeny(E, x^3 + 14*x^2 + 3*x + 11)
sage: phi.codomain() == isogeny_codomain_from_kernel(E, x^3 + 14*x^2 + 3*x + 11)
True
sage: compute_codomain_kohel(E, x^3 + 14*x^2 + 3*x + 11, 7)
Elliptic Curve defined by y^2 + 18*x*y + 16*y = x^3 + 17*x^2 + 18*x + 18 over Finite Field of size 19
sage: E = EllipticCurve(GF(19), [1,2,3,4,5])
sage: phi = EllipticCurveIsogeny(E, x^3 + 7*x^2 + 15*x + 12)
sage: isogeny_codomain_from_kernel(E, x^3 + 7*x^2 + 15*x + 12) == phi.codomain()
True
sage: compute_codomain_kohel(E, x^3 + 7*x^2 + 15*x + 12,4)
Elliptic Curve defined by y^2 + x*y + 3*y = x^3 + 2*x^2 + 3*x + 15 over Finite Field of size 19
.. NOTE::
This function uses the formulas of Section 2.4 of [K96]_.
REFERENCES:
.. [K96] Kohel, "Endomorphism Rings of Elliptic Curves over Finite
Fields", UC Berkeley PhD thesis 1996.
"""
# First set up the polynomial ring
base_field = E.base_ring()
poly_ring = PolynomialRing(base_field,'x')
if (is_Polynomial(kernel)):
psi = poly_ring(kernel)
kernel_list = psi.list()
elif isinstance(kernel, list) and (kernel[0] in base_field):
kernel_list = kernel
psi = poly_ring(kernel_list)
else:
raise ValueError("Invalid input to compute_codomain_kohel")
# next determine the even / odd part of the isogeny
psi_2tor = two_torsion_part(E, psi)
if (0 != psi_2tor.degree()): # even degree case
psi_quo = psi//psi_2tor
if (0 != psi_quo.degree()):
raise ArithmeticError("For basic Kohel's algorithm, if the kernel degree is even then the kernel must be contained in the two torsion.")
n = psi_2tor.degree()
if (1 == n): # degree divisible exactly by 2
a1,a2,a3,a4,a6 = E.ainvs()
x0 = -psi_2tor.constant_coefficient()
# determine y0
if (2 == base_field.characteristic()):
y0 = (x0**3 + a2*x0**2 + a4*x0 + a6).sqrt()
else:
y0 = -(a1*x0 + a3)/2
# now (x0,y0) is the 2-torsion point in the kernel
(v,w) = compute_vw_kohel_even_deg1(x0,y0,a1,a2,a4)
elif (3 == n): # psi_2tor is the full 2-division polynomial
b2 = E.b2()
b4 = E.b4()
s = psi_2tor.list()
s1 = -s[n-1]
s2 = s[n-2]
s3 = -s[n-3]
(v,w) = compute_vw_kohel_even_deg3(b2,b4,s1,s2,s3)
else: # odd degree case
n = psi.degree()
b2 = E.b2()
b4 = E.b4()
b6 = E.b6()
s1 = 0; s2 = 0; s3 = 0
if (1 <= n):
s1 = -kernel_list[n-1]
if (2 <= n):
s2 = kernel_list[n-2]
if (3 <= n):
s3 = -kernel_list[n-3]
# initializing these allows us to calculate E2.
(v,w) = compute_vw_kohel_odd(b2,b4,b6,s1,s2,s3,n)
return compute_codomain_formula(E, v, w)
def two_torsion_part(E, psi):
r"""
Returns the greatest common divisor of ``psi`` and the 2 torsion
polynomial of `E`.
INPUT:
- ``E`` -- an elliptic curve
- ``psi`` -- a univariate polynomial over the base field of ``E``
OUTPUT:
(polynomial) the gcd of psi and the 2-torsion polynomial of ``E``.
EXAMPLES:
Every function that computes the kernel polynomial via Kohel's
formulas will call this function::
sage: E = EllipticCurve(GF(19), [1,2,3,4,5])
sage: R.<x> = GF(19)[]
sage: phi = EllipticCurveIsogeny(E, x + 13)
sage: isogeny_codomain_from_kernel(E, x + 13) == phi.codomain()
True
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import two_torsion_part
sage: two_torsion_part(E, x+13)
x + 13
"""
x = psi.parent().gen() # NB psi is univariate but could be constant
psi_2 = E.two_division_polynomial(x)
return psi.gcd(psi_2)
class EllipticCurveIsogeny(Morphism):
r"""
Class Implementing Isogenies of Elliptic Curves
This class implements cyclic, separable, normalized isogenies of
elliptic curves.
Several different algorithms for computing isogenies are
available. These include:
- Velu's Formulas: Velu's original formulas for computing
isogenies. This algorithm is selected by giving as the
``kernel`` parameter a list of points which generate a finite
subgroup.
- Kohel's Formulas: Kohel's original formulas for computing
isogenies. This algorithm is selected by giving as the
``kernel`` parameter a monic polynomial (or a coefficient list
(little endian)) which will define the kernel of the isogeny.
INPUT:
- ``E`` -- an elliptic curve, the domain of the isogeny to
initialize.
- ``kernel`` -- a kernel, either a point in ``E``, a list of
points in ``E``, a monic kernel polynomial, or ``None``. If
initializing from a domain/codomain, this must be set to None.
- ``codomain`` -- an elliptic curve (default:``None``). If
``kernel`` is ``None``, then this must be the codomain of a
cyclic, separable, normalized isogeny, furthermore, ``degree``
must be the degree of the isogeny from ``E`` to ``codomain``. If
``kernel`` is not ``None``, then this must be isomorphic to the
codomain of the cyclic normalized separable isogeny defined by
``kernel``, in this case, the isogeny is post composed with an
isomorphism so that this parameter is the codomain.
- ``degree`` -- an integer (default:``None``). If ``kernel`` is
``None``, then this is the degree of the isogeny from ``E`` to
``codomain``. If ``kernel`` is not ``None``, then this is used
to determine whether or not to skip a gcd of the kernel
polynomial with the two torsion polynomial of ``E``.
- ``model`` -- a string (default:``None``). Only supported
variable is ``minimal``, in which case if ``E`` is a curve over
the rationals or over a number field, then the codomain is a
global minimum model where this exists.
- ``check`` (default: ``True``) checks if the input is valid to
define an isogeny
EXAMPLES:
A simple example of creating an isogeny of a field of small
characteristic::
sage: E = EllipticCurve(GF(7), [0,0,0,1,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)) ); phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + x over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 3*x over Finite Field of size 7
sage: phi.degree() == 2
True
sage: phi.kernel_polynomial()
x
sage: phi.rational_maps()
((x^2 + 1)/x, (x^2*y - y)/x^2)
sage: phi == loads(dumps(phi)) # known bug
True
A more complicated example of a characteristic 2 field::
sage: E = EllipticCurve(GF(2^4,'alpha'), [0,0,1,0,1])
sage: P = E((1,1))
sage: phi_v = EllipticCurveIsogeny(E, P); phi_v
Isogeny of degree 3 from Elliptic Curve defined by y^2 + y = x^3 + 1 over Finite Field in alpha of size 2^4 to Elliptic Curve defined by y^2 + y = x^3 over Finite Field in alpha of size 2^4
sage: phi_ker_poly = phi_v.kernel_polynomial()
sage: phi_ker_poly
x + 1
sage: ker_poly_list = phi_ker_poly.list()
sage: phi_k = EllipticCurveIsogeny(E, ker_poly_list)
sage: phi_k == phi_v
True
sage: phi_k.rational_maps()
((x^3 + x + 1)/(x^2 + 1), (x^3*y + x^2*y + x*y + x + y)/(x^3 + x^2 + x + 1))
sage: phi_v.rational_maps()
((x^3 + x + 1)/(x^2 + 1), (x^3*y + x^2*y + x*y + x + y)/(x^3 + x^2 + x + 1))
sage: phi_k.degree() == phi_v.degree() == 3
True
sage: phi_k.is_separable()
True
sage: phi_v(E(0))
(0 : 1 : 0)
sage: alpha = E.base_field().gen()
sage: Q = E((0, alpha*(alpha + 1)))
sage: phi_v(Q)
(1 : alpha^2 + alpha : 1)
sage: phi_v(P) == phi_k(P)
True
sage: phi_k(P) == phi_v.codomain()(0)
True
We can create an isogeny that has kernel equal to the full 2
torsion::
sage: E = EllipticCurve(GF(3), [0,0,0,1,1])
sage: ker_list = E.division_polynomial(2).list()
sage: phi = EllipticCurveIsogeny(E, ker_list); phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 = x^3 + x + 1 over Finite Field of size 3 to Elliptic Curve defined by y^2 = x^3 + x + 1 over Finite Field of size 3
sage: phi(E(0))
(0 : 1 : 0)
sage: phi(E((0,1)))
(1 : 0 : 1)
sage: phi(E((0,2)))
(1 : 0 : 1)
sage: phi(E((1,0)))
(0 : 1 : 0)
sage: phi.degree()
4
We can also create trivial isogenies with the trivial kernel::
sage: E = EllipticCurve(GF(17), [11, 11, 4, 12, 10])
sage: phi_v = EllipticCurveIsogeny(E, E(0))
sage: phi_v.degree()
1
sage: phi_v.rational_maps()
(x, y)
sage: E == phi_v.codomain()
True
sage: P = E.random_point()
sage: phi_v(P) == P
True
sage: E = EllipticCurve(GF(31), [23, 1, 22, 7, 18])
sage: phi_k = EllipticCurveIsogeny(E, [1]); phi_k
Isogeny of degree 1 from Elliptic Curve defined by y^2 + 23*x*y + 22*y = x^3 + x^2 + 7*x + 18 over Finite Field of size 31 to Elliptic Curve defined by y^2 + 23*x*y + 22*y = x^3 + x^2 + 7*x + 18 over Finite Field of size 31
sage: phi_k.degree()
1
sage: phi_k.rational_maps()
(x, y)
sage: phi_k.codomain() == E
True
sage: phi_k.kernel_polynomial()
1
sage: P = E.random_point(); P == phi_k(P)
True
Velu and Kohel also work in characteristic 0::
sage: E = EllipticCurve(QQ, [0,0,0,3,4])
sage: P_list = E.torsion_points()
sage: phi = EllipticCurveIsogeny(E, P_list); phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 3*x + 4 over Rational Field to Elliptic Curve defined by y^2 = x^3 - 27*x + 46 over Rational Field
sage: P = E((0,2))
sage: phi(P)
(6 : -10 : 1)
sage: phi_ker_poly = phi.kernel_polynomial()
sage: phi_ker_poly
x + 1
sage: ker_poly_list = phi_ker_poly.list()
sage: phi_k = EllipticCurveIsogeny(E, ker_poly_list); phi_k
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 3*x + 4 over Rational Field to Elliptic Curve defined by y^2 = x^3 - 27*x + 46 over Rational Field
sage: phi_k(P) == phi(P)
True
sage: phi_k == phi
True
sage: phi_k.degree()
2
sage: phi_k.is_separable()
True
A more complicated example over the rationals (of odd degree)::
sage: E = EllipticCurve('11a1')
sage: P_list = E.torsion_points()
sage: phi_v = EllipticCurveIsogeny(E, P_list); phi_v
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: P = E((16,-61))
sage: phi_v(P)
(0 : 1 : 0)
sage: ker_poly = phi_v.kernel_polynomial(); ker_poly
x^2 - 21*x + 80
sage: ker_poly_list = ker_poly.list()
sage: phi_k = EllipticCurveIsogeny(E, ker_poly_list); phi_k
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: phi_k == phi_v
True
sage: phi_v(P) == phi_k(P)
True
sage: phi_k.is_separable()
True
We can also do this same example over the number field defined by
the irreducible two torsion polynomial of `E`::
sage: E = EllipticCurve('11a1')
sage: P_list = E.torsion_points()
sage: K.<alpha> = NumberField(x^3 - 2* x^2 - 40*x - 158)
sage: EK = E.change_ring(K)
sage: P_list = [EK(P) for P in P_list]
sage: phi_v = EllipticCurveIsogeny(EK, P_list); phi_v
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in alpha with defining polynomial x^3 - 2*x^2 - 40*x - 158 to Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-7820)*x + (-263580) over Number Field in alpha with defining polynomial x^3 - 2*x^2 - 40*x - 158
sage: P = EK((alpha/2,-1/2))
sage: phi_v(P)
(122/121*alpha^2 + 1633/242*alpha - 3920/121 : -1/2 : 1)
sage: ker_poly = phi_v.kernel_polynomial()
sage: ker_poly
x^2 - 21*x + 80
sage: ker_poly_list = ker_poly.list()
sage: phi_k = EllipticCurveIsogeny(EK, ker_poly_list)
sage: phi_k
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in alpha with defining polynomial x^3 - 2*x^2 - 40*x - 158 to Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-7820)*x + (-263580) over Number Field in alpha with defining polynomial x^3 - 2*x^2 - 40*x - 158
sage: phi_v == phi_k
True
sage: phi_k(P) == phi_v(P)
True
sage: phi_k == phi_v
True
sage: phi_k.degree()
5
sage: phi_v.is_separable()
True
The following example shows how to specify an isogeny from domain
and codomain::
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]
sage: f = x^2 - 21*x + 80
sage: phi = E.isogeny(f)
sage: E2 = phi.codomain()
sage: phi_s = EllipticCurveIsogeny(E, None, E2, 5)
sage: phi_s
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: phi_s == phi
True
sage: phi_s.rational_maps() == phi.rational_maps()
True
However only cyclic normalized isogenies can be constructed this
way. So it won't find the isogeny [3]::
sage: E.isogeny(None, codomain=E,degree=9)
Traceback (most recent call last):
...
ValueError: The two curves are not linked by a cyclic normalized isogeny of degree 9
Also the presumed isogeny between the domain and codomain must be
normalized::
sage: E2.isogeny(None,codomain=E,degree=5)
Traceback (most recent call last):
...
ValueError: The two curves are not linked by a cyclic normalized isogeny of degree 5
sage: phihat = phi.dual(); phihat
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field
sage: phihat.is_normalized()
False
Here an example of a construction of a endomorphisms with cyclic
kernel on a CM-curve::
sage: K.<i> = NumberField(x^2+1)
sage: E = EllipticCurve(K, [1,0])
sage: RK.<X> = K[]
sage: f = X^2 - 2/5*i + 1/5
sage: phi= E.isogeny(f)
sage: isom = phi.codomain().isomorphism_to(E)
sage: phi.set_post_isomorphism(isom)
sage: phi.codomain() == phi.domain()
True
sage: phi.rational_maps()
(((4/25*i + 3/25)*x^5 + (4/5*i - 2/5)*x^3 - x)/(x^4 + (-4/5*i + 2/5)*x^2 + (-4/25*i - 3/25)), ((11/125*i + 2/125)*x^6*y + (-23/125*i + 64/125)*x^4*y + (141/125*i + 162/125)*x^2*y + (3/25*i - 4/25)*y)/(x^6 + (-6/5*i + 3/5)*x^4 + (-12/25*i - 9/25)*x^2 + (2/125*i - 11/125)))
Domain and codomain tests (see :trac:`12880`)::
sage: E = EllipticCurve(QQ, [0,0,0,1,0])
sage: phi = EllipticCurveIsogeny(E, E(0,0))
sage: phi.domain() == E
True
sage: phi.codomain()
Elliptic Curve defined by y^2 = x^3 - 4*x over Rational Field
sage: E = EllipticCurve(GF(31), [1,0,0,1,2])
sage: phi = EllipticCurveIsogeny(E, [17, 1])
sage: phi.domain()
Elliptic Curve defined by y^2 + x*y = x^3 + x + 2 over Finite Field of size 31
sage: phi.codomain()
Elliptic Curve defined by y^2 + x*y = x^3 + 24*x + 6 over Finite Field of size 31
Composition tests (see :trac:`16245`)::
sage: E = EllipticCurve(j=GF(7)(0))
sage: phi = E.isogeny([E(0), E((0,1)), E((0,-1))]); phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 7
sage: phi2 = phi * phi; phi2
Composite map:
From: Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 7
To: Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 7
Defn: Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 7
then
Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 7
Examples over relative number fields used not to work (see :trac:`16779`)::
sage: pol26 = hilbert_class_polynomial(-4*26)
sage: pol = NumberField(pol26,'a').optimized_representation()[0].polynomial()
sage: K.<a> = NumberField(pol)
sage: j = pol26.roots(K)[0][0]
sage: E = EllipticCurve(j=j)
sage: L.<b> = K.extension(x^2+26)
sage: EL = E.change_ring(L)
sage: iso2 = EL.isogenies_prime_degree(2); len(iso2)
1
sage: iso3 = EL.isogenies_prime_degree(3); len(iso3)
2
Examples over function fields used not to work (see :trac:`11327`)::
sage: F.<t> = FunctionField(QQ)
sage: E = EllipticCurve([0,0,0,-t^2,0])
sage: isogs = E.isogenies_prime_degree(2)
sage: isogs[0]
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + (-t^2)*x over Rational function field in t over Rational Field to Elliptic Curve defined by y^2 = x^3 + 4*t^2*x over Rational function field in t over Rational Field
sage: isogs[0].rational_maps()
((x^2 - t^2)/x, (x^2*y + t^2*y)/x^2)
sage: duals = [phi.dual() for phi in isogs]
sage: duals[0]
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 4*t^2*x over Rational function field in t over Rational Field to Elliptic Curve defined by y^2 = x^3 + (-t^2)*x over Rational function field in t over Rational Field
sage: duals[0].rational_maps()
((1/4*x^2 + t^2)/x, (1/8*x^2*y + (-1/2*t^2)*y)/x^2)
sage: duals[0]
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 4*t^2*x over Rational function field in t over Rational Field to Elliptic Curve defined by y^2 = x^3 + (-t^2)*x over Rational function field in t over Rational Field
"""
####################
# member variables
####################
__check = None
#
# variables common to all algorithms
#
__E1 = None # domain curve
__E2 = None # codomain curve
__degree = None
__separable = True # This class only implements separable isogenies (for now.)
__algorithm = None
__this_hash = None
__check = None
#
# pre isomorphism
#
__intermediate_domain = None
__pre_isomorphism = None
__prei_x_coord_ratl_map = None
__prei_y_coord_ratl_map = None
#
# post isomorphism
#
__intermediate_codomain = None
__post_isomorphism = None
__posti_x_coord_ratl_map = None
__posti_y_coord_ratl_map = None
#
# algebraic structs
#
__base_field = None
__poly_ring = None # univariate in x over __base_field
__mpoly_ring = None # bivariate in x, y over __base_field
#
# Rational Maps
#
__rational_maps_initialized = False
__X_coord_rational_map = None
__Y_coord_rational_map = None
#
# The dual
#
__dual = None
#
# Kernel Data
#
__kernel_list = None # list of elements in the kernel
__kernel_polynomial_list = None # polynomial stored as a little endian list of coefficients
__kernel_polynomial = None # polynomial with roots at x values for x-coordinate of points in the kernel
__inner_kernel_polynomial = None # the inner kernel polynomial (ignoring preisomorphism)
__n = None
#
# member variables common to Velu's formula
#
# we keep track of the 2 torsion and non2torsion separately
__kernel_2tor = None
__kernel_non2tor = None
# variables used in Velu's formula (as well as Kohel's variant)
__v = None
__w = None
#
# member variables specific to Kohel's algorithm.
#
__psi = None # psi polynomial
__phi = None # phi polynomial
__omega = None # omega polynomial
#
# Python Special Functions
#
def __init__(self, E, kernel, codomain=None, degree=None, model=None, check=True):
r"""
Constructor for EllipticCurveIsogeny class.
EXAMPLES::
sage: E = EllipticCurve(GF(2), [0,0,1,0,1])
sage: phi = EllipticCurveIsogeny(E, [1,1]); phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 + y = x^3 + 1 over Finite Field of size 2 to Elliptic Curve defined by y^2 + y = x^3 over Finite Field of size 2
sage: E = EllipticCurve(GF(31), [0,0,0,1,0])
sage: P = E((2,17))
sage: phi = EllipticCurveIsogeny(E, P); phi
Isogeny of degree 8 from Elliptic Curve defined by y^2 = x^3 + x over Finite Field of size 31 to Elliptic Curve defined by y^2 = x^3 + 10*x + 28 over Finite Field of size 31
sage: E = EllipticCurve('17a1')
sage: phi = EllipticCurveIsogeny(E, [41/3, -55, -1, -1, 1]); phi
Isogeny of degree 9 from Elliptic Curve defined by y^2 + x*y + y = x^3 - x^2 - x - 14 over Rational Field to Elliptic Curve defined by y^2 + x*y + y = x^3 - x^2 - 56*x - 10124 over Rational Field
sage: E = EllipticCurve('37a1')
sage: triv = EllipticCurveIsogeny(E, E(0)); triv
Isogeny of degree 1 from Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field
sage: triv.rational_maps()
(x, y)
sage: E = EllipticCurve('49a3')
sage: R.<X> = QQ[]
sage: EllipticCurveIsogeny(E,X^3-13*X^2-58*X+503,check=False)
Isogeny of degree 7 from Elliptic Curve defined by y^2 + x*y = x^3 - x^2 - 107*x + 552 over Rational Field to Elliptic Curve defined by y^2 + x*y = x^3 - x^2 - 5252*x - 178837 over Rational Field
"""
if not is_EllipticCurve(E):
raise ValueError("E parameter must be an EllipticCurve.")
if not isinstance(kernel, list) and kernel in E :
# a single point was given, we put it in a list
# the first condition assures that [1,1] is treated as x+1
kernel = [kernel]
# if the kernel is None and the codomain isn't
# calculate the kernel polynomial
pre_isom = None
post_isom = None
self.__check = check
if (kernel is None) and (codomain is not None):
if (degree is None):
raise ValueError("If specifying isogeny by domain and codomain, degree parameter must be set.")
# save the domain/codomain: really used now (trac #7096)
old_codomain = codomain
(pre_isom, post_isom, E, codomain, kernel) = compute_sequence_of_maps(E, codomain, degree)
self.__init_algebraic_structs(E)
algorithm = isogeny_determine_algorithm(E, kernel)
self.__algorithm = algorithm
if ("velu"==algorithm):
self.__init_from_kernel_list(kernel)
elif ("kohel"==algorithm):
self.__init_from_kernel_polynomial(kernel)
self.__compute_E2()
self.__setup_post_isomorphism(codomain, model)
if (pre_isom is not None):
self.set_pre_isomorphism(pre_isom)
if (post_isom is not None):
self.__set_post_isomorphism(old_codomain, post_isom) #(trac #7096)
# Inheritance house keeping
self.__perform_inheritance_housekeeping()
def _call_(self, P):
r"""
Function that implements the call-ability of elliptic curve
isogenies.
EXAMPLES::
sage: E = EllipticCurve(GF(17), [1, 9, 5, 4, 3])
sage: phi = EllipticCurveIsogeny(E, [6,13,1])
sage: phi(E((1,0)))
(15 : 13 : 1)
sage: E = EllipticCurve(GF(23), [0,0,0,1,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi(E((1,5)))
(2 : 0 : 1)
sage: E = EllipticCurve(QQ, [0,0,0,3,0])
sage: P = E((1,2))
sage: phi = EllipticCurveIsogeny(E, [0,1])
sage: phi(P)
(4 : -4 : 1)
sage: phi(-P)
(4 : 4 : 1)
sage: E = EllipticCurve(GF(17), [0,-1,0,-3,-1])
sage: Q = E((16,0))
sage: tau = E.isogeny([Q],E)
sage: tau(Q)
(0 : 1 : 0)
TESTS:
Tests for :trac:`10888`::
sage: K.<th> = NumberField(x^2+3)
sage: E = EllipticCurve(K,[7,0])
sage: phi = E.isogeny(E(0,0))
sage: P = E(-3,4*th)
sage: phi(P)
(-16/3 : 8/9*th : 1)
sage: Q = phi(P)
sage: phihat = phi.dual()
sage: phihat(Q)
(-1/48 : 127/576*th : 1)
Call a composed isogeny (added for :trac:`16238`)::
sage: E = EllipticCurve(j=GF(7)(0))
sage: phi = E.isogeny([E(0), E((0,1)), E((0,-1))])
sage: phi(E.points()[0])
(0 : 1 : 0)
sage: phi2 = phi * phi
sage: phi2(E.points()[0])
(0 : 1 : 0)
Coercion works fine with :meth:`_call_` (added for :trac:`16238`)::
sage: K.<th> = NumberField(x^2+3)
sage: E = EllipticCurve(K,[7,0])
sage: E2=EllipticCurve(K,[5,0])
sage: phi=E.isogeny(E(0))
sage: phi(E2(0))
(0 : 1 : 0)
sage: E2(20,90)
(20 : 90 : 1)
sage: phi(E2(20,90))
Traceback (most recent call last):
...
TypeError: (20 : 90 : 1) fails to convert into the map's domain Elliptic Curve defined by y^2 = x^3 + 7*x over Number Field in th with defining polynomial x^2 + 3, but a `pushforward` method is not properly implemented
"""
if(P.is_zero()):
return self.__E2(0)
(xP, yP) = P.xy()
# if there is a pre isomorphism, apply it
if (self.__pre_isomorphism is not None):
temp_xP = self.__prei_x_coord_ratl_map(xP)
temp_yP = self.__prei_y_coord_ratl_map(xP, yP)
(xP, yP) = (temp_xP, temp_yP)
if ("velu" == self.__algorithm):
outP = self.__compute_via_velu_numeric(xP, yP)
elif ("kohel" == self.__algorithm):
outP = self.__compute_via_kohel_numeric(xP,yP)
# the intermediate functions return the point at infinity
# if the input point is in the kernel
if (outP == self.__intermediate_codomain(0)):
return self.__E2(0)
# if there is a post isomorphism, apply it
if (self.__post_isomorphism is not None):
tempX = self.__posti_x_coord_ratl_map(outP[0])
tempY = self.__posti_y_coord_ratl_map(outP[0], outP[1])
outP = (tempX, tempY)
return self.__E2(outP)
def __getitem__(self, i):
r"""
Return one of the rational map components.
.. NOTE::
Both components are returned as elements of the function
field `F(x,y)` in two variables over the base field `F`,
though the first only involves `x`. To obtain the
`x`-coordinate function as a rational function in `F(x)`,
use :meth:`x_rational_map`.
EXAMPLES::
sage: E = EllipticCurve(QQ, [0,2,0,1,-1])
sage: phi = EllipticCurveIsogeny(E, [1])
sage: phi[0]
x
sage: phi[1]
y
sage: E = EllipticCurve(GF(17), [0,0,0,3,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi[0]
(x^2 + 3)/x
sage: phi[1]
(x^2*y - 3*y)/x^2
"""
return self.rational_maps()[i]
def __iter__(self):
r"""
Return an iterator through the rational map components.
EXAMPLES::
sage: E = EllipticCurve(QQ, [0,2,0,1,-1])
sage: phi = EllipticCurveIsogeny(E, [1])
sage: for c in phi: print(c)
x
y
sage: E = EllipticCurve(GF(17), [0,0,0,3,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: for c in phi: print(c)
(x^2 + 3)/x
(x^2*y - 3*y)/x^2
"""
return iter(self.rational_maps())
def __hash__(self):
r"""
Function that implements the hash ability of Isogeny objects.
This hashes the underlying kernel polynomial so that equal
isogeny objects have the same hash value. Also, this hashes
the base field, and domain and codomain curves as well, so
that isogenies with the same kernel polynomial (over different
base fields / curves) hash to different values.
EXAMPLES::
sage: E = EllipticCurve(QQ, [0,0,0,1,0])
sage: phi_v = EllipticCurveIsogeny(E, E((0,0)))
sage: phi_k = EllipticCurveIsogeny(E, [0,1])
sage: phi_k.__hash__() == phi_v.__hash__()
True
sage: E_F17 = EllipticCurve(GF(17), [0,0,0,1,1])
sage: phi_p = EllipticCurveIsogeny(E_F17, E_F17([0,1]))
sage: phi_p.__hash__() == phi_v.__hash__()
False
sage: E = EllipticCurve('49a3')
sage: R.<X> = QQ[]
sage: EllipticCurveIsogeny(E,X^3-13*X^2-58*X+503,check=False)
Isogeny of degree 7 from Elliptic Curve defined by y^2 + x*y = x^3 - x^2 - 107*x + 552 over Rational Field to Elliptic Curve defined by y^2 + x*y = x^3 - x^2 - 5252*x - 178837 over Rational Field
"""
if self.__this_hash is not None:
return self.__this_hash
ker_poly_list = self.__kernel_polynomial_list
if ker_poly_list is None:
ker_poly_list = self.__init_kernel_polynomial()
this_hash = 0
for a in ker_poly_list:
this_hash ^= hash(a)
this_hash ^= hash(self.__E1)
this_hash ^= hash(self.__E2)
this_hash ^= hash(self.__base_field)
self.__this_hash = this_hash
return self.__this_hash
def _richcmp_(self, other, op):
r"""
Function that implements comparisons between isogeny objects.
This function works by comparing the underlying kernel
objects.
EXAMPLES::
sage: E = EllipticCurve(QQ, [0,0,0,1,0])
sage: phi_v = EllipticCurveIsogeny(E, E((0,0)))
sage: phi_k = EllipticCurveIsogeny(E, [0,1])
sage: phi_k == phi_v
True
sage: E_F17 = EllipticCurve(GF(17), [0,0,0,1,0])
sage: phi_p = EllipticCurveIsogeny(E_F17, [0,1])
sage: phi_p == phi_v
False
sage: E = EllipticCurve('11a1')
sage: phi = E.isogeny(E(5,5))
sage: phi == phi
True
sage: phi == -phi
False
sage: psi = E.isogeny(phi.kernel_polynomial())
sage: phi == psi
True
sage: phi.dual() == psi.dual()
True
"""
if (self.__kernel_polynomial is None):
self.__init_kernel_polynomial()
# We cannot just compare kernel polynomials, as was done until
# Trac #11327, as then phi and -phi compare equal, and
# similarly with phi and any composition of phi with an
# automorphism of its codomain, or any post-isomorphism.
# Comparing domains, codomains and rational maps seems much
# safer.
lx = self.domain()
rx = other.domain()
if lx != rx:
return richcmp_not_equal(lx, rx, op)
lx = self.codomain()
rx = other.codomain()
if lx != rx:
return richcmp_not_equal(lx, rx, op)
return richcmp(self.rational_maps(), other.rational_maps(), op)
def __neg__(self):
r"""
Function to implement unary negation (-) operator on
isogenies. Returns a copy of this isogeny that has been
negated.
EXAMPLES:
The following examples inherently exercise this function::
sage: E = EllipticCurve(j=GF(17)(0))
sage: phi = EllipticCurveIsogeny(E, E((-1,0)) )
sage: negphi = -phi
sage: phi(E((0,1))) + negphi(E((0,1))) == 0
True
sage: E = EllipticCurve(j=GF(19)(1728))
sage: R.<x> = GF(19)[]
sage: phi = EllipticCurveIsogeny(E, x)
sage: negphi = -phi
sage: phi(E((3,7))) + negphi(E((3,12))) == phi(2*E((3,7)))
True
sage: negphi(E((18,6)))
(17 : 0 : 1)
sage: R.<x> = QQ[]
sage: E = EllipticCurve('17a1')
sage: R.<x> = QQ[]
sage: f = x - 11/4
sage: phi = EllipticCurveIsogeny(E, f)
sage: negphi = -phi
sage: phi.rational_maps()[0] == negphi.rational_maps()[0]
True
sage: P = E((7,13))
sage: phi(P) + negphi(P) == 0
True
"""
# save off the kernel lists
kernel_list = self.__kernel_list
self.__kernel_list = None
output = copy(self)
# reset the kernel lists
output.__kernel_list = copy(kernel_list)
self.__kernel_list = kernel_list
output.switch_sign()
return output
#
# Sage Special Functions
#
def _repr_(self):
r"""
Special sage specific function that implement the
functionality to display the isogeny self as a string.
EXAMPLES::
sage: E = EllipticCurve(GF(31), [1,0,1,1,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)) )
sage: phi._repr_()
'Isogeny of degree 17 from Elliptic Curve defined by y^2 + x*y + y = x^3 + x over Finite Field of size 31 to Elliptic Curve defined by y^2 + x*y + y = x^3 + 14*x + 9 over Finite Field of size 31'
sage: E = EllipticCurve(QQ, [1,0,0,1,9])
sage: phi = EllipticCurveIsogeny(E, [2,1])
sage: phi._repr_()
'Isogeny of degree 2 from Elliptic Curve defined by y^2 + x*y = x^3 + x + 9 over Rational Field to Elliptic Curve defined by y^2 + x*y = x^3 - 59*x + 165 over Rational Field'
"""
return 'Isogeny of degree %r from %r to %r' % (
self.__degree, self.__E1, self.__E2)
def _latex_(self):
r"""
Special sage specific function that implements functionality
to display an isogeny object as a latex string.
This function returns a latex string representing the isogeny
self as the `x` and `y` coordinate rational functions.
EXAMPLES::
sage: E = EllipticCurve(QQ, [0,0,0,1,-1])
sage: phi = EllipticCurveIsogeny(E, E(0))
sage: phi._latex_()
'\\left( x , y \\right)'
sage: E = EllipticCurve(GF(17), [0,0,0,1,-1])
sage: R.<X> = GF(17)[]
sage: phi = EllipticCurveIsogeny(E, X+11)
sage: phi._latex_()
'\\left( \\frac{x^{2} + 11 x + 7}{x + 11} , \\frac{x^{2} y + 5 x y + 12 y}{x^{2} + 5 x + 2} \\right)'
"""
ratl_maps = self.rational_maps()
return '\\left( %s , %s \\right)' % (ratl_maps[0]._latex_(), ratl_maps[1]._latex_())
###########################
# Private Common Functions
###########################
# delete the hash value
def __clear_cached_values(self):
r"""
A private function to clear the hash if the codomain has been
modified by a pre or post isomorphism.
EXAMPLES::
sage: F = GF(7)
sage: E = EllipticCurve(j=F(0))
sage: phi = EllipticCurveIsogeny(E, [E((0,-1)), E((0,1))])
sage: old_hash = hash(phi)
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: phi.set_post_isomorphism(WeierstrassIsomorphism(phi.codomain(), (-1,2,-3,4)))
sage: hash(phi) == old_hash
False
sage: R.<x> = QQ[]
sage: E = EllipticCurve(QQ, [0,0,0,1,0])
sage: phi = EllipticCurveIsogeny(E, x)
sage: old_ratl_maps = phi.rational_maps()
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: phi.set_post_isomorphism(WeierstrassIsomorphism(phi.codomain(), (-1,0,0,0)))
sage: old_ratl_maps == phi.rational_maps()
False
sage: old_ratl_maps[1] == -phi.rational_maps()[1]
True
sage: F = GF(127); R.<x> = F[]
sage: E = EllipticCurve(j=F(1728))
sage: f = x^5 + 43*x^4 + 97*x^3 + 81*x^2 + 42*x + 82
sage: phi = EllipticCurveIsogeny(E, f)
sage: old_hash = hash(phi)
sage: old_ratl_maps = phi.rational_maps()
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: phi.set_post_isomorphism(WeierstrassIsomorphism(phi.codomain(), (-13,13,-13,13)))
sage: old_hash == hash(phi)
False
sage: old_ratl_maps == phi.rational_maps()
False
sage: phi._EllipticCurveIsogeny__clear_cached_values()
"""
self.__this_hash = None
self.__rational_maps_initialized = False
self.__X_coord_rational_map = None
self.__Y_coord_rational_map = None
self.__dual = None
# performs the inheritance house keeping
def __perform_inheritance_housekeeping(self):
r"""
Internal helper function, sets values on the super classes of
this class.
EXAMPLES:
The following examples will implicitly exercise this
function::
sage: E = EllipticCurve(GF(43), [2,3,5,7,11])
sage: R.<x> = GF(43)[]; f = x + 42
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi._EllipticCurveIsogeny__perform_inheritance_housekeeping()
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: E2 = phi.codomain()
sage: post_isom = WeierstrassIsomorphism(E2, (41, 37, 31, 29))
sage: phi.set_post_isomorphism(post_isom)
sage: E1pr = WeierstrassIsomorphism(E, (-1, 2, -3, 4)).codomain().codomain()
sage: pre_isom = E1pr.isomorphism_to(E)
sage: phi.set_pre_isomorphism(pre_isom)
"""
# one of the superclasses uses these fields
self._domain = self.__E1
self._codomain = self.__E2
# sets up the parent
parent = homset.Hom(self.__E1, self.__E2)
Morphism.__init__(self, parent)
def __init_algebraic_structs(self, E):
r"""
An internal function for EllipticCurveIsogeny objects that
sets up the member variables necessary for algebra.
EXAMPLES::
sage: E = EllipticCurve(j=GF(17)(0))
sage: phi = EllipticCurveIsogeny(E, E((-1,0)))
The constructor calls this function itself, so the fields it
sets are already defined::
sage: phi._EllipticCurveIsogeny__E1
Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 17
sage: phi._EllipticCurveIsogeny__base_field
Finite Field of size 17
sage: phi._EllipticCurveIsogeny__poly_ring
Univariate Polynomial Ring in x over Finite Field of size 17
sage: phi._EllipticCurveIsogeny__mpoly_ring
Multivariate Polynomial Ring in x, y over Finite Field of size 17
sage: phi._EllipticCurveIsogeny__intermediate_domain
Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 17
Now, calling the initialization function does nothing more::
sage: phi._EllipticCurveIsogeny__init_algebraic_structs(E)
sage: phi._EllipticCurveIsogeny__E1
Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 17
sage: phi._EllipticCurveIsogeny__base_field
Finite Field of size 17
sage: phi._EllipticCurveIsogeny__poly_ring
Univariate Polynomial Ring in x over Finite Field of size 17
sage: phi._EllipticCurveIsogeny__mpoly_ring
Multivariate Polynomial Ring in x, y over Finite Field of size 17
sage: phi._EllipticCurveIsogeny__intermediate_domain
Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 17
sage: E = EllipticCurve(QQ, [0,0,0,1,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi._EllipticCurveIsogeny__init_algebraic_structs(E)
sage: phi._EllipticCurveIsogeny__E1
Elliptic Curve defined by y^2 = x^3 + x over Rational Field
sage: phi._EllipticCurveIsogeny__base_field
Rational Field
sage: phi._EllipticCurveIsogeny__poly_ring
Univariate Polynomial Ring in x over Rational Field
sage: phi._EllipticCurveIsogeny__mpoly_ring
Multivariate Polynomial Ring in x, y over Rational Field
sage: phi._EllipticCurveIsogeny__intermediate_domain
Elliptic Curve defined by y^2 = x^3 + x over Rational Field
sage: F = GF(19); R.<x> = F[]
sage: E = EllipticCurve(j=GF(19)(0))
sage: phi = EllipticCurveIsogeny(E, x)
sage: phi._EllipticCurveIsogeny__init_algebraic_structs(E)
sage: phi._EllipticCurveIsogeny__E1
Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 19
sage: phi._EllipticCurveIsogeny__base_field
Finite Field of size 19
sage: phi._EllipticCurveIsogeny__poly_ring
Univariate Polynomial Ring in x over Finite Field of size 19
sage: phi._EllipticCurveIsogeny__mpoly_ring
Multivariate Polynomial Ring in x, y over Finite Field of size 19
sage: phi._EllipticCurveIsogeny__intermediate_domain
Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 19
"""
self.__E1 = E
self.__base_field = E.base_ring()
self.__poly_ring = PolynomialRing(self.__base_field, ['x'])
self.__mpoly_ring = PolynomialRing(self.__base_field, ['x','y'])
from sage.rings.all import FractionField
self.__xfield = FractionField(self.__poly_ring)
self.__xyfield = FractionField(self.__mpoly_ring)
self.__intermediate_domain = E
def __compute_E2(self):
r"""
Private function that computes and sets the isogeny codomain.
EXAMPLES:
These examples inherently exercise this function::
sage: E = EllipticCurve(j=GF(7)(1728))
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi.codomain()
Elliptic Curve defined by y^2 = x^3 + 3*x over Finite Field of size 7
sage: phi._EllipticCurveIsogeny__compute_E2()
sage: R.<x> = GF(7)[]
sage: phi = EllipticCurveIsogeny(E, x)
sage: phi.codomain()
Elliptic Curve defined by y^2 = x^3 + 3*x over Finite Field of size 7
sage: phi._EllipticCurveIsogeny__compute_E2()
"""
if ("velu" == self.__algorithm):
E2 = self.__compute_E2_via_velu()
elif ("kohel" == self.__algorithm):
E2 = self.__compute_E2_via_kohel()
self.__E2 = E2
self.__intermediate_codomain = E2
# initializes the rational maps fields
def __initialize_rational_maps(self, precomputed_maps=None):
r"""
Private function that computes and initializes the rational
maps.
INPUT:
- ``precomputed_maps`` (default None) -- tuple (X,Y) of
rational functions in x,y
EXAMPLES:
The following examples inherently exercise this function::
sage: E = EllipticCurve(j=GF(7)(1728))
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi._EllipticCurveIsogeny__initialize_rational_maps()
sage: phi.rational_maps()
((x^2 + 1)/x, (x^2*y - y)/x^2)
sage: R.<x> = GF(7)[]
sage: phi = EllipticCurveIsogeny(E, x)
sage: phi = EllipticCurveIsogeny(E, x)
sage: phi.rational_maps()
((x^2 + 1)/x, (x^2*y - y)/x^2)
sage: phi._EllipticCurveIsogeny__initialize_rational_maps()
sage: E = EllipticCurve([1,2,3,4,5])
sage: Eshort = E.short_weierstrass_model()
sage: phi = E.isogeny(E(0), Eshort)
sage: phiX, phiY = phi.rational_maps()
sage: phiX(1,2), phiY(1,2)
(63, 864)
"""
if self.__rational_maps_initialized:
return
if precomputed_maps is None:
if ("velu"==self.__algorithm):
(X_map, Y_map) = self.__initialize_rational_maps_via_velu()
if ("kohel"==self.__algorithm):
(X_map, Y_map) = self.__initialize_rational_maps_via_kohel()
else:
X_map, Y_map = precomputed_maps
# cannot coerce directly in xfield for some reason
X_map = self.__poly_ring(X_map.numerator())/self.__poly_ring(X_map.denominator())
if self.__prei_x_coord_ratl_map is not None:
prei_X_map = self.__prei_x_coord_ratl_map
prei_Y_map = self.__prei_y_coord_ratl_map
X_map = X_map(prei_X_map)
Y_map = Y_map([prei_X_map, prei_Y_map])
if self.__posti_x_coord_ratl_map is not None:
# Do not reverse the order here!
Y_map = self.__posti_y_coord_ratl_map([X_map, Y_map])
X_map = self.__posti_x_coord_ratl_map(X_map)
self.__X_coord_rational_map = self.__xfield(X_map)
self.__Y_coord_rational_map = self.__xyfield(Y_map)
self.__rational_maps_initialized = True
def __init_kernel_polynomial(self):
r"""
Private function that initializes the kernel polynomial (if
the algorithm does not take it as a parameter).
EXAMPLES:
The following examples inherently exercise this function::
sage: E = EllipticCurve(j=GF(7)(1728))
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi.kernel_polynomial()
x
sage: phi._EllipticCurveIsogeny__init_kernel_polynomial()
[0, 1]
"""
if (self.__kernel_polynomial_list is not None):
return self.__kernel_polynomial_list
if ("velu" == self.__algorithm):
ker_poly_list = self.__init_kernel_polynomial_velu()
else:
raise RuntimeError("The kernel polynomial should already be defined!")
return ker_poly_list
def __set_pre_isomorphism(self, domain, isomorphism):
r"""
Private function to set the pre isomorphism and domain (and
keep track of the domain of the isogeny).
EXAMPLES::
sage: E = EllipticCurve(GF(43), [2,3,5,7,11])
sage: R.<x> = GF(43)[]; f = x + 42
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi._EllipticCurveIsogeny__perform_inheritance_housekeeping()
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: E1pr = WeierstrassIsomorphism(E, (-1, 2, -3, 4)).codomain().codomain()
sage: pre_isom = E1pr.isomorphism_to(E)
sage: phi.set_pre_isomorphism(pre_isom)
sage: phi._EllipticCurveIsogeny__set_pre_isomorphism(E, WeierstrassIsomorphism(E, (-1, 3, -3, 4)))
sage: E == phi.domain()
True
"""
self.__E1 = domain
# set the isomorphism
self.__pre_isomorphism = isomorphism
# calculate the isomorphism as a rational map.
u, r, s, t = [self.__base_field(c) for c in isomorphism.tuple()]
uinv = 1/u
uinv2 = uinv**2
uinv3 = uinv*uinv2
x = self.__poly_ring.gen()
y = self.__xyfield.gen(1) # not mpoly_ring.gen(1) else we end
# up in K(x)[y] and trouble ensues
self.__prei_x_coord_ratl_map = (x - r) * uinv2
self.__prei_y_coord_ratl_map = (y - s*(x-r) - t) * uinv3
if (self.__kernel_polynomial is not None):
ker_poly = self.__kernel_polynomial
ker_poly = ker_poly(self.__prei_x_coord_ratl_map)
self.__kernel_polynomial = ker_poly.monic()
self.__perform_inheritance_housekeeping()
def __set_post_isomorphism(self, codomain, isomorphism):
r"""
Private function to set the post isomorphism and codomain (and
keep track of the codomain of the isogeny).
EXAMPLES:
The following examples inherently exercise this function::
sage: E = EllipticCurve(j=GF(7)(1728))
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: E2 = phi.codomain()
sage: isom = WeierstrassIsomorphism(E2, (-1,2,-3,4))
sage: phi.set_post_isomorphism(isom)
sage: phi._EllipticCurveIsogeny__set_post_isomorphism(E2, WeierstrassIsomorphism(phi.codomain(), (1,-2,3,-4)))
sage: E2 == phi.codomain()
True
"""
# set the codomains
self.__E2 = codomain
# set the isomorphism
self.__post_isomorphism = isomorphism
# calculate the isomorphism as a rational map.
u, r, s, t = [self.__base_field(c) for c in isomorphism.tuple()]
uinv = 1/u
uinv2 = uinv**2
uinv3 = uinv*uinv2
x = self.__poly_ring.gen()
y = self.__xyfield.gen(1)
self.__posti_x_coord_ratl_map = (x - r) * uinv2
self.__posti_y_coord_ratl_map = (y - s*(x-r) - t) * uinv3
self.__perform_inheritance_housekeeping()
def __setup_post_isomorphism(self, codomain, model):
r"""
Private function to set up the post isomorphism given the
codomain.
EXAMPLES:
The following examples inherently exercise this function::
sage: E = EllipticCurve(j=GF(7)(1728))
sage: E2 = EllipticCurve(GF(7), [0,0,0,5,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)), E2); phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + x over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 5*x over Finite Field of size 7
sage: E3 = EllipticCurve(GF(7), [0,0,0,6,0])
sage: phi._EllipticCurveIsogeny__setup_post_isomorphism(E3, None)
sage: phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + x over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 6*x over Finite Field of size 7
sage: R.<x> = QQ[]
sage: E = EllipticCurve(j=1728)
sage: f = x^3 - x
sage: phi = EllipticCurveIsogeny(E, f, model='minimal'); phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 = x^3 - x over Rational Field to Elliptic Curve defined by y^2 = x^3 - x over Rational Field
sage: phi = EllipticCurveIsogeny(E, f, model=None)
sage: phi._EllipticCurveIsogeny__setup_post_isomorphism(None, 'minimal')
sage: phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 = x^3 - x over Rational Field to Elliptic Curve defined by y^2 = x^3 - x over Rational Field
"""
# TODO: add checks to make sure that codomain and model
# parameters are consistent with the algorithm used.
post_isom = None
newE2 = None
oldE2 = self.__E2
if (model is not None):
if (codomain is not None):
raise ValueError("Cannot specify a codomain and model flag simultaneously.")
if ('minimal' == model):
if (not is_NumberField(oldE2.base_field())):
raise ValueError("specifying minimal for model flag only valid with curves over number fields.")
newE2 = oldE2.global_minimal_model(semi_global=True)
post_isom = oldE2.isomorphism_to(newE2)
else:
raise ValueError("Unknown value of model flag.")
elif (codomain is not None):
if (not is_EllipticCurve(codomain)):
raise ValueError("Codomain parameter must be an elliptic curve.")
if (not oldE2.is_isomorphic(codomain)):
raise ValueError("Codomain parameter must be isomorphic to computed codomain isogeny")
newE2 = codomain
post_isom = oldE2.isomorphism_to(newE2)
if (post_isom is not None):
self.__set_post_isomorphism(newE2, post_isom)
return
###########################
# Velu's Formula Functions
###########################
#
# Setup function for Velu's formula
#
def __init_from_kernel_list(self, kernel_gens):
r"""
Private function that initializes the isogeny from a list of
points which generate the kernel (For Velu's formulas.)
EXAMPLES:
The following example inherently exercises this function::
sage: E = EllipticCurve(GF(7), [0,0,0,-1,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0))); phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 6*x over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 4*x over Finite Field of size 7
sage: phi._EllipticCurveIsogeny__init_from_kernel_list([E(0), E((0,0))])
The following example demonstrates the necessity of avoiding any calls
to P.order(), since such calls involve factoring the group order which
could take a long time. ::
sage: p = 12 * next_prime(2^180) * next_prime(2^194) - 1
sage: F = FiniteField(p, proof=False)
sage: E = EllipticCurve([F(1), F(0)])
sage: P = E(0).division_points(3)[1]
sage: EllipticCurveIsogeny(E, P)
Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + x over Finite Field of size 461742260113997803268895001173557974278278194575766957660028841364655249961609425998827452443620996655395008156411 to Elliptic Curve defined by y^2 = x^3 + 80816485163488178037199320944019099858815874115367810482828676054000067654558381377552245721755005198633191074893*x + 301497584865165444049833326660609767433467459033532853758006118022998267706948164646650354324860226263546558337993 over Finite Field of size 461742260113997803268895001173557974278278194575766957660028841364655249961609425998827452443620996655395008156411
"""
if self.__check :
for P in kernel_gens:
if not P.has_finite_order():
raise ValueError("The points in the kernel must be of finite order.")
# Compute a list of points in the subgroup generated by the
# points in kernel_gens. This is very naive: when finite
# subgroups are implemented better, this could be simplified,
# but it won't speed things up too much.
kernel_set = Set([self.__E1(0)])
from sage.misc.all import flatten
def all_multiples(itr, terminal):
mult_list = [terminal]
R = terminal + itr
while R != terminal:
mult_list.append(R)
R = R + itr
return mult_list
for P in kernel_gens:
kernel_set += Set(flatten([all_multiples(P,Q)
for Q in kernel_set]))
self.__kernel_list = kernel_set.list()
self.__kernel_2tor = {}
self.__kernel_non2tor = {}
self.__degree = Integer(len(kernel_set))
self.__sort_kernel_list()
#
# Precompute the values in Velu's Formula.
#
def __sort_kernel_list(self):
r"""
Private function that sorts the list of points in the kernel
(For Velu's formulas). Sorts out the 2 torsion points, and
puts them in a dictionary.
EXAMPLES:
The following example inherently exercises this function::
sage: E = EllipticCurve(GF(7), [0,0,0,-1,0])
sage: P = E((4,2))
sage: phi = EllipticCurveIsogeny(E, P); phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 = x^3 + 6*x over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 2*x over Finite Field of size 7
sage: phi._EllipticCurveIsogeny__kernel_2tor = {}
sage: phi._EllipticCurveIsogeny__kernel_non2tor = {}
sage: phi._EllipticCurveIsogeny__sort_kernel_list()
"""
a1,a2,a3,a4,a6 = self.__E1.ainvs()
v = 0
w = 0
for Q in self.__kernel_list:
if Q.is_zero():
continue
(xQ,yQ) = Q.xy()
gxQ = 3*xQ**2 + 2*a2*xQ + a4 - a1*yQ
gyQ = -2*yQ - a1*xQ - a3
uQ = gyQ**2
# sort torsion points:
if (2*yQ == -a1*xQ - a3): # Q is 2-torsion
vQ = gxQ
self.__kernel_2tor[xQ] = (xQ,yQ,gxQ,gyQ,vQ,uQ)
v = v + vQ
w = w + (uQ + xQ*vQ)
elif xQ not in self.__kernel_non2tor: # Q is not a 2-torsion
vQ = 2*gxQ - a1*gyQ
self.__kernel_non2tor[xQ] = (xQ,yQ,gxQ,gyQ,vQ,uQ)
v = v + vQ
w = w + (uQ + xQ*vQ)
self.__v = v
self.__w = w
#
# Velu's formula computing the codomain curve
#
def __compute_E2_via_velu(self):
r"""
Private function that computes the codomain via Velu's
formulas.
EXAMPLES:
The following example inherently exercises this function::
sage: E = EllipticCurve(GF(7), [0,0,0,-1,0])
sage: P = E((4,2))
sage: phi = EllipticCurveIsogeny(E, P)
sage: phi.codomain()
Elliptic Curve defined by y^2 = x^3 + 2*x over Finite Field of size 7
sage: phi._EllipticCurveIsogeny__compute_E2_via_velu()
Elliptic Curve defined by y^2 = x^3 + 2*x over Finite Field of size 7
"""
v = self.__v
w = self.__w
return compute_codomain_formula(self.__E1, v,w)
def __velu_sum_helper(self, Qvalues, a1, a3, x, y):
r"""
Private function for Velu's formulas, helper function to help
perform the summation.
EXAMPLES:
The following example inherently exercises this function::
sage: E = EllipticCurve(GF(7), [0,0,0,-1,0])
sage: P = E((4,2))
sage: phi = EllipticCurveIsogeny(E, P)
sage: Q = E((0,0)); phi(Q)
(0 : 0 : 1)
sage: phi.rational_maps()
((x^4 - 2*x^3 + x^2 - 3*x)/(x^3 - 2*x^2 + 3*x - 2), (x^5*y - 2*x^3*y - x^2*y - 2*x*y + 2*y)/(x^5 + 3*x^3 + 3*x^2 + x - 1))
sage: F = GF(7)
sage: E = EllipticCurve(F, [0,0,0,1,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)) )
sage: Qvals = phi._EllipticCurveIsogeny__kernel_2tor[0]
sage: phi._EllipticCurveIsogeny__velu_sum_helper(Qvals, 0, 0, F(5), F(5))
(3, 3)
sage: R.<x,y> = GF(7)[]
sage: phi._EllipticCurveIsogeny__velu_sum_helper(Qvals, 0, 0, x, y)
(1/x, y/x^2)
"""
xQ = Qvalues[0]
yQ = Qvalues[1]
gxQ = Qvalues[2]
gyQ = Qvalues[3]
vQ = Qvalues[4]
uQ = Qvalues[5]
t1 = x - xQ
inv_t1 = t1**-1
inv_t1_2 = inv_t1**2
inv_t1_3 = inv_t1_2*inv_t1
tX = vQ*inv_t1 + uQ*(inv_t1_2)
tY0 = uQ*(2*y + a1*x + a3)
tY1 = vQ*(a1*t1 + y - yQ)
tY2 = a1*uQ - gxQ*gyQ
# Without this explicit coercion, tY ends up in K(x)[y]
# instead of K(x,y), and trouble ensues!
from sage.rings.all import FractionField
F = FractionField(y.parent())
tY = ( tY0*F(inv_t1_3) + (tY1 + tY2)*F(inv_t1_2) )
return (tX, tY)
def __compute_via_velu_numeric(self, xP, yP):
r"""
Private function that sorts the list of points in the kernel
(for Velu's formulas). Sorts out the 2 torsion points, and
puts them in a dictionary.
EXAMPLES:
The following example inherently exercises this function::
sage: F = GF(7)
sage: E = EllipticCurve(F, [0,0,0,-1,0])
sage: P = E((4,2))
sage: phi = EllipticCurveIsogeny(E, P)
sage: Q = E((0,0)); phi(Q)
(0 : 0 : 1)
sage: Q = E((-1,0)); phi(Q)
(0 : 0 : 1)
sage: phi._EllipticCurveIsogeny__compute_via_velu_numeric(F(0), F(0))
(0, 0)
sage: phi._EllipticCurveIsogeny__compute_via_velu_numeric(F(-1), F(0))
(0, 0)
"""
# first check if the point is in the kernel
if xP in self.__kernel_2tor or xP in self.__kernel_non2tor:
return self.__intermediate_codomain(0)
outP = self.__compute_via_velu(xP,yP)
return outP
def __compute_via_velu(self, xP, yP):
r"""
Private function for Velu's formulas, to perform the summation.
EXAMPLES:
The following example inherently exercises this function::
sage: F = GF(7)
sage: E = EllipticCurve(F, [0,0,0,-1,0])
sage: P = E((4,2))
sage: phi = EllipticCurveIsogeny(E, P)
sage: Q = E((0,0)); phi(Q)
(0 : 0 : 1)
sage: phi.rational_maps()
((x^4 - 2*x^3 + x^2 - 3*x)/(x^3 - 2*x^2 + 3*x - 2), (x^5*y - 2*x^3*y - x^2*y - 2*x*y + 2*y)/(x^5 + 3*x^3 + 3*x^2 + x - 1))
sage: phi._EllipticCurveIsogeny__compute_via_velu(F(0), F(0))
(0, 0)
sage: R.<x,y> = GF(7)[]
sage: phi._EllipticCurveIsogeny__compute_via_velu(x, y)
((x^4 - 2*x^3 + x^2 - 3*x)/(x^3 - 2*x^2 + 3*x - 2),
(x^5*y - 2*x^3*y - x^2*y - 2*x*y + 2*y)/(x^5 + 3*x^3 + 3*x^2 + x - 1))
"""
ker_2tor = self.__kernel_2tor
ker_non2tor = self.__kernel_non2tor
X = 0
Y = 0
a1 = self.__E1.a1()
a3 = self.__E1.a3()
# next iterate over the 2torsion points of the kernel
for Qvalues in itervalues(ker_2tor):
(tX, tY) = self.__velu_sum_helper(Qvalues, a1, a3, xP, yP)
X = X + tX
Y = Y + tY
for Qvalues in itervalues(ker_non2tor):
(tX, tY) = self.__velu_sum_helper(Qvalues, a1, a3, xP, yP)
X = X + tX
Y = Y + tY
X = xP + X
Y = yP - Y
return (X,Y)
def __initialize_rational_maps_via_velu(self):
r"""
Private function for Velu's formulas, helper function to
initialize the rational maps.
EXAMPLES:
The following example inherently exercises this function::
sage: E = EllipticCurve(GF(7), [0,0,0,-1,0])
sage: P = E((4,2))
sage: phi = EllipticCurveIsogeny(E, P)
sage: phi.rational_maps()
((x^4 - 2*x^3 + x^2 - 3*x)/(x^3 - 2*x^2 + 3*x - 2), (x^5*y - 2*x^3*y - x^2*y - 2*x*y + 2*y)/(x^5 + 3*x^3 + 3*x^2 + x - 1))
sage: phi._EllipticCurveIsogeny__initialize_rational_maps_via_velu()
((x^4 + 5*x^3 + x^2 + 4*x)/(x^3 + 5*x^2 + 3*x + 5), (x^5*y - 2*x^3*y - x^2*y - 2*x*y + 2*y)/(x^5 + 3*x^3 + 3*x^2 + x - 1))
"""
x = self.__poly_ring.gen()
y = self.__mpoly_ring.gen(1)
return self.__compute_via_velu(x,y)
def __init_kernel_polynomial_velu(self):
r"""
Private function for Velu's formulas, helper function to
initialize the rational maps.
EXAMPLES:
The following example inherently exercises this function::
sage: E = EllipticCurve(GF(7), [0,0,0,-1,0])
sage: P = E((4,2))
sage: phi = EllipticCurveIsogeny(E, P)
sage: phi.kernel_polynomial()
x^2 + 2*x + 4
sage: phi._EllipticCurveIsogeny__init_kernel_polynomial_velu()
[4, 2, 1]
"""
poly_ring = self.__poly_ring
x = poly_ring.gen()
invX = 0
if (self.__pre_isomorphism is not None):
pre_isom = self.__pre_isomorphism
u = pre_isom.u
r = pre_isom.r
invX = (u**2)*x + r
else:
invX = x
psi = poly_ring(1)
for Qvalues in itervalues(self.__kernel_2tor):
xQ = invX(x=Qvalues[0])
psi = psi*(x - xQ)
for Qvalues in itervalues(self.__kernel_non2tor):
xQ = invX(x=Qvalues[0])
psi = psi*(x - xQ)
ker_poly_list = psi.list()
self.__kernel_polynomial_list = ker_poly_list
self.__kernel_polynomial = psi
return ker_poly_list
###################################
# Kohel's Variant of Velu's Formula
###################################
def __init_from_kernel_polynomial(self, kernel_polynomial):
r"""
Private function that initializes the isogeny from a kernel
polynomial.
EXAMPLES:
These examples inherently exercise this private function::
sage: R.<x> = GF(7)[]
sage: E = EllipticCurve(GF(7), [0,0,0,-1,0])
sage: phi = EllipticCurveIsogeny(E, x);phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 6*x over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 4*x over Finite Field of size 7
sage: phi._EllipticCurveIsogeny__init_from_kernel_polynomial(x)
sage: E = EllipticCurve(GF(7), [0,-1,0,0,1])
sage: phi = EllipticCurveIsogeny(E, x+6, degree=3); phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 1 over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 4*x + 2 over Finite Field of size 7
sage: phi._EllipticCurveIsogeny__init_from_kernel_polynomial(x+6)
"""
poly_ring = self.__poly_ring
E = self.__E1
# Convert to a univariate polynomial, even if it had a
# bivariate parent, or was given as a list:
self.__kernel_polynomial = psi = poly_ring(kernel_polynomial)
if psi.leading_coefficient() != 1:
raise ValueError("The kernel polynomial must be monic.")
self.__kernel_polynomial_list = psi.list()
#
# Determine if kernel polynomial is entirely a two torsion
#
psi_G = two_torsion_part(E, psi).monic()
if (0 != psi_G.degree()): # even degree case
psi_quo = psi//psi_G
if (0 != psi_quo.degree()):
raise NotImplementedError("For basic Kohel's algorithm, if the kernel degree is even then the kernel must be contained in the two torsion.")
(phi, omega, v, w, n, d) = self.__init_even_kernel_polynomial(E, psi_G)
else: # odd degree case
(phi, omega, v, w, n, d) = self.__init_odd_kernel_polynomial(E, psi)
#
# Set up the necessary instance variables
#
self.__kernel_polynomial = psi
self.__inner_kernel_polynomial = psi
self.__degree = Integer(d) # degree of the isogeny
# As a rational map, the isogeny maps (x,y) to (X,Y), where
# X=phi(x)/psi(x)^2 and Y=omega(x,y)/psi(x)^3. Both phi and
# psi are univariate polynomials in x, while omega is a
# bivariate polynomial in x, y. The names are compatible so
# that univariate polynomials automatically coerce into the
# bivariate polynomial ring.
self.__psi = psi
self.__phi = phi
self.__omega = omega
self.__v = v
self.__w = w
def __init_even_kernel_polynomial(self, E, psi_G):
r"""
Returns the isogeny parameters for the 2-part of an isogeny.
INPUT:
- ``E`` -- an elliptic curve
- ``psi_G`` -- a univariate polynomial over the base field of
``E`` of degree 1 or 3 dividing its 2-division polynomial
OUTPUT:
(phi, omega, v, w, n, d) where:
- ``phi`` is a univariate polynomial, the numerator of the
`X`-coordinate of the isogeny;
- ``omega`` is a bivariate polynomial, the numerator of the
`Y`-coordinate of the isogeny;
- ``v``, ``w`` are the Velu parameters of the isogeny;
- ``n`` is the degree of ``psi``;
- ``d`` is the degree of the isogeny.
EXAMPLES:
These examples inherently exercise this private function::
sage: R.<x> = GF(7)[]
sage: E = EllipticCurve(GF(7), [-1,0])
sage: phi = EllipticCurveIsogeny(E, x); phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 6*x over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 4*x over Finite Field of size 7
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import two_torsion_part
sage: psig = two_torsion_part(E,x)
sage: phi._EllipticCurveIsogeny__init_even_kernel_polynomial(E,psig)
(x^3 + 6*x, x^3*y + x*y, 6, 0, 1, 2)
sage: F = GF(2^4, 'alpha'); R.<x> = F[]
sage: E = EllipticCurve(F, [1,1,0,1,0])
sage: phi = EllipticCurveIsogeny(E, x); phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 + x*y = x^3 + x^2 + x over Finite Field in alpha of size 2^4 to Elliptic Curve defined by y^2 + x*y = x^3 + x^2 + 1 over Finite Field in alpha of size 2^4
sage: psig = two_torsion_part(E,x)
sage: phi._EllipticCurveIsogeny__init_even_kernel_polynomial(E,psig)
(x^3 + x, x^3*y + x^2 + x*y, 1, 0, 1, 2)
sage: E = EllipticCurve(GF(7), [0,-1,0,0,1])
sage: R.<x> = GF(7)[]
sage: f = x^3 + 6*x^2 + 1
sage: phi = EllipticCurveIsogeny(E, f); phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 1 over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 2*x + 5 over Finite Field of size 7
sage: psig = two_torsion_part(E,f)
sage: psig = two_torsion_part(E,f)
sage: phi._EllipticCurveIsogeny__init_even_kernel_polynomial(E,psig)
(x^7 + 5*x^6 + 2*x^5 + 6*x^4 + 3*x^3 + 5*x^2 + 6*x + 3, x^9*y - 3*x^8*y + 2*x^7*y - 3*x^3*y + 2*x^2*y + x*y - y, 1, 6, 3, 4)
"""
#check if the polynomial really divides the two_torsion_polynomial
if self.__check and E.division_polynomial(2, x=self.__poly_ring.gen()) % psi_G != 0 :
raise ValueError("The polynomial does not define a finite subgroup of the elliptic curve.")
n = psi_G.degree() # 1 or 3
d = n+1 # 2 or 4
base_field = self.__base_field
char = base_field.characteristic()
a1,a2,a3,a4,a6 = E.ainvs()
b2,b4,_,_ = E.b_invariants()
x = self.__poly_ring.gen()
y = self.__mpoly_ring.gen(1)
if (1 == n):
x0 = -psi_G.constant_coefficient()
# determine y0
if (2 == char):
y0 = (x0**3 + a2*x0**2 + a4*x0 + a6).sqrt()
else:
y0 = -(a1*x0 + a3)/2
(v,w) = compute_vw_kohel_even_deg1(x0,y0,a1,a2,a4)
phi = (x*psi_G + v)*psi_G
omega = (y*psi_G**2 - v*(a1*psi_G + (y - y0)))*psi_G
elif (3 == n):
s = psi_G.list()
s1 = -s[n-1]
s2 = s[n-2]
s3 = -s[n-3]
psi_G_pr = psi_G.derivative()
psi_G_prpr = psi_G_pr.derivative()
phi = (psi_G_pr**2) + (-2*psi_G_prpr + (4*x - s1))*psi_G
phi_pr = phi.derivative(x)
psi_2 = 2*y + a1*x + a3
omega = (psi_2*(phi_pr*psi_G - phi*psi_G_pr) - (a1*phi + a3*psi_G)*psi_G)/2
phi = phi*psi_G
omega = omega*psi_G
(v,w) = compute_vw_kohel_even_deg3(b2,b4,s1,s2,s3)
else:
raise ValueError("input polynomial must be of degree 1 or 3, not %d" % n)
return (phi, omega, v, w, n, d)
def __init_odd_kernel_polynomial(self, E, psi):
r"""
Returns the isogeny parameters for a cyclic isogeny of odd degree.
INPUT:
- ``E`` -- an elliptic curve
- ``psi`` -- a univariate polynomial over the base field of
``E``, assumed to be a kernel polynomial
OUTPUT:
(phi, omega, v, w, n, d) where:
- ``phi`` is a univariate polynomial, the numerator of the
`X`-coordinate of the isogeny;
- ``omega`` is a bivariate polynomial, the numerator of the
`Y`-coordinate of the isogeny;
- ``v``, ``w`` are the Velu parameters of the isogeny;
- ``n`` is the degree of ``psi``;
- ``d`` is the degree of the isogeny.
EXAMPLES:
These examples inherently exercise this private function::
sage: R.<x> = GF(7)[]
sage: E = EllipticCurve(GF(7), [0,-1,0,0,1])
sage: phi = EllipticCurveIsogeny(E, x+6, degree=3); phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 1 over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 4*x + 2 over Finite Field of size 7
sage: R.<x> = GF(7)[]
sage: phi._EllipticCurveIsogeny__init_odd_kernel_polynomial(E, x+6)
(x^3 + 5*x^2 + 3*x + 2, x^3*y - 3*x^2*y + x*y, 2, 6, 1, 3)
sage: F = GF(2^4, 'alpha'); R.<x> = F[]
sage: alpha = F.gen()
sage: E = EllipticCurve(F, [1,1,F.gen(),F.gen()^2+1,1])
sage: f = x + alpha^2 + 1
sage: phi = EllipticCurveIsogeny(E, f); phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 + x*y + alpha*y = x^3 + x^2 + (alpha^2+1)*x + 1 over Finite Field in alpha of size 2^4 to Elliptic Curve defined by y^2 + x*y + alpha*y = x^3 + x^2 + alpha*x + alpha^3 over Finite Field in alpha of size 2^4
sage: R.<x> = F[]
sage: f = x + alpha^2 + 1
sage: phi._EllipticCurveIsogeny__init_odd_kernel_polynomial(E, f)
(x^3 + (alpha^2 + 1)*x + alpha^3 + alpha^2 + alpha, x^3*y + (alpha^2 + 1)*x^2*y + (alpha^2 + alpha + 1)*x^2 + (alpha^2 + 1)*x*y + (alpha^2 + alpha)*x + (alpha)*y + (alpha), alpha^2 + alpha + 1, alpha^3 + alpha^2 + alpha, 1, 3)
sage: E = EllipticCurve(j=-262537412640768000)
sage: f = (E.isogenies_prime_degree()[0]).kernel_polynomial()
sage: f.degree()
81
sage: E.isogeny(kernel=f) # long time (3.6s, 2014)
Isogeny of degree 163 from Elliptic Curve defined by y^2 + y = x^3 - 2174420*x + 1234136692 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - 57772164980*x - 5344733777551611 over Rational Field
"""
n = psi.degree()
d = 2*n + 1
# check if the polynomial really divides the torsion polynomial :
if self.__check:
alpha = psi.parent().quotient(psi).gen()
if not E.division_polynomial(d, x=alpha).is_zero():
raise ValueError("The polynomial does not define a finite subgroup of the elliptic curve.")
b2, b4, b6, _ = E.b_invariants()
psi_coeffs = psi.list()
s1 = 0; s2 = 0; s3 = 0
if (1 <= n):
s1 = -psi_coeffs[n-1]
if (2 <= n):
s2 = psi_coeffs[n-2]
if (3 <= n):
s3 = -psi_coeffs[n-3]
# initializing these allows us to calculate E2.
(v,w) = compute_vw_kohel_odd(b2,b4,b6,s1,s2,s3,n)
# initialize the polynomial temporary variables
psi_pr = psi.derivative()
psi_prpr = psi_pr.derivative()
x = self.__poly_ring.gen()
phi = (4*x**3 + b2*x**2 + 2*b4*x + b6)*(psi_pr**2 - psi_prpr*psi) - \
(6*x**2 + b2*x + b4)*psi_pr*psi + (d*x - 2*s1)*psi**2
phi_pr = phi.derivative(x)
if (2 != self.__base_field.characteristic()):
omega = self.__compute_omega_fast(E, psi, psi_pr, phi, phi_pr)
else:
omega = self.__compute_omega_general(E, psi, psi_pr, phi, phi_pr)
return (phi, omega, v, w, n, d)
#
# This is the fast omega computation that works when characteristic is not 2
#
def __compute_omega_fast(self, E, psi, psi_pr, phi, phi_pr):
r"""
Return omega from phi, psi and their derivatives, used when
the characteristic field is not 2.
INPUT:
- ``E`` -- an elliptic curve.
- ``psi, psi_pr, phi, phi_pr`` -- univariate polynomials over
the base field of ``E``, where ``psi`` is the kernel
polynomial and ``phi`` the numerator of the `X`-coordinate
of the isogeny, together with their derivatives.
OUTPUT:
- ``omega`` -- a bivariate polynomial giving the numerator of
the `Y`-coordinate of the isogeny.
EXAMPLES:
These examples inherently exercise this private function::
sage: R.<x> = GF(7)[]
sage: E = EllipticCurve(GF(7), [0,-1,0,0,1])
sage: phi = EllipticCurveIsogeny(E, x+6, degree=3); phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 1 over Finite Field of size 7 to Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 4*x + 2 over Finite Field of size 7
sage: R.<x,y> = GF(7)[]
sage: psi = phi._EllipticCurveIsogeny__psi
sage: psi_pr = psi.derivative()
sage: fi = phi._EllipticCurveIsogeny__phi
sage: fi_pr = fi.derivative()
sage: phi._EllipticCurveIsogeny__compute_omega_fast(E, psi, psi_pr, fi, fi_pr)
x^3*y - 3*x^2*y + x*y
"""
a1 = E.a1()
a3 = E.a3()
x, y = self.__mpoly_ring.gens()
psi_2 = 2*y + a1*x + a3
# note, the formula below is correct
# the formula in Kohel's thesis has some typos
# notably the first plus sign should be a minus
# as it is here below.
return phi_pr*psi*psi_2/2 - phi*psi_pr*psi_2 - (a1*phi + a3*psi**2)*psi/2
def __compute_omega_general(self, E, psi, psi_pr, phi, phi_pr):
r"""
Return omega from phi, psi and their derivatives, in any
characteristic.
INPUT:
- ``E`` -- an elliptic curve.
- ``psi, psi_pr, phi, phi_pr`` -- univariate polynomials over
the base field of ``E``, where ``psi`` is the kernel
polynomial and ``phi`` the numerator of the `X`-coordinate
of the isogeny, together with their derivatives.
OUTPUT:
- ``omega`` -- a bivariate polynomial giving the numerator of
the `Y`-coordinate of the isogeny.
EXAMPLES:
These examples inherently exercise this private function::
sage: F = GF(2^4, 'alpha'); R.<x> = F[]
sage: alpha = F.gen()
sage: E = EllipticCurve(F, [1,1,F.gen(),F.gen()^2+1,1])
sage: f = x + alpha^2 + 1
sage: phi = EllipticCurveIsogeny(E, f); phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 + x*y + alpha*y = x^3 + x^2 + (alpha^2+1)*x + 1 over Finite Field in alpha of size 2^4 to Elliptic Curve defined by y^2 + x*y + alpha*y = x^3 + x^2 + alpha*x + alpha^3 over Finite Field in alpha of size 2^4
sage: R.<x,y> = F[]
sage: psi = phi._EllipticCurveIsogeny__psi
sage: psi_pr = psi.derivative()
sage: fi = phi._EllipticCurveIsogeny__phi
sage: fi_pr = fi.derivative()
sage: phi._EllipticCurveIsogeny__compute_omega_general(E, psi, psi_pr, fi, fi_pr)
x^3*y + (alpha^2 + 1)*x^2*y + (alpha^2 + alpha + 1)*x^2 + (alpha^2 + 1)*x*y + (alpha^2 + alpha)*x + (alpha)*y + (alpha)
A bug fixed in :trac:`7907`::
sage: F = GF(128,'a')
sage: a = F.gen()
sage: E = EllipticCurve([1,0,0,0,(a**6+a**4+a**2+a)])
sage: x = polygen(F)
sage: ker = (x^6 + (a^6 + a^5 + a^4 + a^3 + a^2 + a)*x^5 + (a^6 + a^5 + a^2 + 1)*x^4 + (a^6 + a^5 + a^4 + a^3 + a^2 + 1)*x^3 + (a^6 + a^3 + a)*x^2 + (a^4 + a^3 + 1)*x + a^5 + a^4 + a)
sage: E.isogeny(ker)
Isogeny of degree 13 from Elliptic Curve defined by y^2 + x*y = x^3 + (a^6+a^4+a^2+a) over Finite Field in a of size 2^7 to Elliptic Curve defined by y^2 + x*y = x^3 + (a^6+a^5+a^4+a^3+a^2+a)*x + (a^5+a^3) over Finite Field in a of size 2^7
"""
a1, a2, a3, a4, a6 = E.ainvs()
b2, b4, _, _ = E.b_invariants()
n = psi.degree()
d = 2 * n + 1
x, y = self.__mpoly_ring.gens()
psi_2 = 2 * y + a1 * x + a3
psi_coeffs = psi.list()
if (0 < n):
s1 = -psi_coeffs[n - 1]
else:
s1 = 0
psi_prpr = 0
cur_x_pow = 1
# Note: we now get the "derivatives" of psi
# these are not actually the derivatives
# furthermore, the formulas in Kohel's
# thesis are wrong, the correct formulas
# are coded below
from sage.arith.all import binomial
for j in range(n - 1):
psi_prpr += binomial(j+2, 2) * psi_coeffs[(j+2)] * cur_x_pow
cur_x_pow = x * cur_x_pow
psi_prprpr = 0
cur_x_pow = 1
for j in range(n - 2):
psi_prprpr += (3 * binomial(j+3,3)) * psi_coeffs[(j+3)] * cur_x_pow
cur_x_pow = x * cur_x_pow
omega = phi_pr*psi*y - phi*psi_pr*psi_2 + \
((a1*x + a3)*(psi_2**2)*(psi_prpr*psi_pr-psi_prprpr*psi) + \
(a1*psi_2**2 - 3*(a1*x + a3)*(6*x**2 + b2*x + b4))*psi_prpr*psi + \
(a1*x**3 + 3*a3*x**2 + (2*a2*a3 - a1*a4)*x + (a3*a4 - 2*a1*a6))*psi_pr**2 + \
(-(3*a1*x**2 + 6*a3*x + (-a1*a4 + 2*a2*a3)) + \
(a1*x + a3)*(d*x - 2*s1) )*psi_pr*psi + (a1*s1 + a3*n)*psi**2)*psi
return omega
def __compute_via_kohel_numeric(self, xP, yP):
r"""
Private function that computes the image of a point under this
isogeny, using Kohel's formulas.
EXAMPLES:
These examples inherently exercise this private function::
sage: R.<x> = GF(7)[]
sage: E = EllipticCurve(GF(7), [0,-1,0,0,1])
sage: phi = EllipticCurveIsogeny(E, x+6, degree=3)
sage: P = E((0,1)); phi(P)
(2 : 0 : 1)
sage: P = E((1,1)); phi(P)
(0 : 1 : 0)
sage: phi._EllipticCurveIsogeny__compute_via_kohel_numeric(0, 1)
(2, 0)
sage: phi._EllipticCurveIsogeny__compute_via_kohel_numeric(1, 1)
(0 : 1 : 0)
"""
# first check if this point is in the kernel:
if(0 == self.__inner_kernel_polynomial(x=xP)):
return self.__intermediate_codomain(0)
(xP_out, yP_out) = self.__compute_via_kohel(xP,yP)
# xP_out and yP_out do not always get evaluated to field
# elements but rather constant polynomials, so we do some
# explicit casting
return (self.__base_field(xP_out), self.__base_field(yP_out))
def __compute_via_kohel(self, xP, yP):
r"""
Private function that applies Kohel's formulas.
EXAMPLES:
These examples inherently exercise this private function::
sage: R.<x> = GF(7)[]
sage: E = EllipticCurve(GF(7), [0,-1,0,0,1])
sage: phi = EllipticCurveIsogeny(E, x+6, degree=3)
sage: P = E((0,1)); phi(P)
(2 : 0 : 1)
sage: phi.rational_maps()
((x^3 - 2*x^2 + 3*x + 2)/(x^2 - 2*x + 1), (x^3*y - 3*x^2*y + x*y)/(x^3 - 3*x^2 + 3*x - 1))
sage: phi._EllipticCurveIsogeny__compute_via_kohel(0,1)
(2, 0)
sage: R.<x,y> = GF(7)[]
sage: phi._EllipticCurveIsogeny__compute_via_kohel(x,y)
((x^3 - 2*x^2 + 3*x + 2)/(x^2 - 2*x + 1), (x^3*y - 3*x^2*y + x*y)/(x^3 - 3*x^2 + 3*x - 1))
"""
a = self.__phi(xP)
b = self.__omega(xP, yP)
c = self.__psi(xP)
cc = self.__mpoly_ring(c)
return (a/c**2, b/cc**3)
def __initialize_rational_maps_via_kohel(self):
r"""
Private function that computes and initializes the rational
maps of this isogeny.
EXAMPLES:
These examples inherently exercise this private function::
sage: R.<x> = GF(7)[]
sage: E = EllipticCurve(GF(7), [0,-1,0,0,1])
sage: phi = EllipticCurveIsogeny(E, x+6, degree=3)
sage: phi.rational_maps()
((x^3 - 2*x^2 + 3*x + 2)/(x^2 - 2*x + 1), (x^3*y - 3*x^2*y + x*y)/(x^3 - 3*x^2 + 3*x - 1))
sage: phi._EllipticCurveIsogeny__initialize_rational_maps_via_kohel()
((x^3 + 5*x^2 + 3*x + 2)/(x^2 + 5*x + 1), (x^3*y - 3*x^2*y + x*y)/(x^3 - 3*x^2 + 3*x - 1))
"""
x = self.__poly_ring.gen()
y = self.__mpoly_ring.gen(1)
return self.__compute_via_kohel(x,y)
#
# Kohel's formula computing the codomain curve
#
def __compute_E2_via_kohel(self):
r"""
Private function that computes and initializes the codomain of
the isogeny (via Kohel's.)
EXAMPLES:
These examples inherently exercise this private function::
sage: R.<x> = GF(7)[]
sage: E = EllipticCurve(GF(7), [0,-1,0,0,1])
sage: phi = EllipticCurveIsogeny(E, x+6, degree=3)
sage: phi.codomain()
Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 4*x + 2 over Finite Field of size 7
sage: phi._EllipticCurveIsogeny__compute_E2_via_kohel()
Elliptic Curve defined by y^2 = x^3 + 6*x^2 + 4*x + 2 over Finite Field of size 7
"""
v = self.__v
w = self.__w
return compute_codomain_formula(self.__E1, v,w)
#
# public isogeny methods
#
def degree(self):
r"""
Returns the degree of this isogeny.
EXAMPLES::
sage: E = EllipticCurve(QQ, [0,0,0,1,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi.degree()
2
sage: phi = EllipticCurveIsogeny(E, [0,1,0,1])
sage: phi.degree()
4
sage: E = EllipticCurve(GF(31), [1,0,0,1,2])
sage: phi = EllipticCurveIsogeny(E, [17, 1])
sage: phi.degree()
3
"""
return self.__degree
def rational_maps(self):
r"""
Return the pair of rational maps defining this isogeny.
.. NOTE::
Both components are returned as elements of the function
field `F(x,y)` in two variables over the base field `F`,
though the first only involves `x`. To obtain the
`x`-coordinate function as a rational function in `F(x)`,
use :meth:`x_rational_map`.
EXAMPLES::
sage: E = EllipticCurve(QQ, [0,2,0,1,-1])
sage: phi = EllipticCurveIsogeny(E, [1])
sage: phi.rational_maps()
(x, y)
sage: E = EllipticCurve(GF(17), [0,0,0,3,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi.rational_maps()
((x^2 + 3)/x, (x^2*y - 3*y)/x^2)
"""
if (not self.__rational_maps_initialized):
self.__initialize_rational_maps()
return (self.__xyfield(self.__X_coord_rational_map),
self.__Y_coord_rational_map)
def x_rational_map(self):
r"""
Return the rational map giving the `x`-coordinate of this isogeny.
.. NOTE::
This function returns the `x`-coordinate component of the
isogeny as a rational function in `F(x)`, where `F` is the
base field. To obtain both coordinate functions as
elements of the function field `F(x,y)` in two variables,
use :meth:`rational_maps`.
EXAMPLES::
sage: E = EllipticCurve(QQ, [0,2,0,1,-1])
sage: phi = EllipticCurveIsogeny(E, [1])
sage: phi.x_rational_map()
x
sage: E = EllipticCurve(GF(17), [0,0,0,3,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi.x_rational_map()
(x^2 + 3)/x
"""
if (not self.__rational_maps_initialized):
self.__initialize_rational_maps()
return self.__X_coord_rational_map
def is_separable(self):
r"""
Return whether or not this isogeny is separable.
.. NOTE::
This function always returns ``True`` as currently this
class only implements separable isogenies.
EXAMPLES::
sage: E = EllipticCurve(GF(17), [0,0,0,3,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi.is_separable()
True
sage: E = EllipticCurve('11a1')
sage: phi = EllipticCurveIsogeny(E, E.torsion_points())
sage: phi.is_separable()
True
"""
return self.__separable
def kernel_polynomial(self):
r"""
Return the kernel polynomial of this isogeny.
EXAMPLES::
sage: E = EllipticCurve(QQ, [0,0,0,2,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi.kernel_polynomial()
x
sage: E = EllipticCurve('11a1')
sage: phi = EllipticCurveIsogeny(E, E.torsion_points())
sage: phi.kernel_polynomial()
x^2 - 21*x + 80
sage: E = EllipticCurve(GF(17), [1,-1,1,-1,1])
sage: phi = EllipticCurveIsogeny(E, [1])
sage: phi.kernel_polynomial()
1
sage: E = EllipticCurve(GF(31), [0,0,0,3,0])
sage: phi = EllipticCurveIsogeny(E, [0,3,0,1])
sage: phi.kernel_polynomial()
x^3 + 3*x
"""
if self.__kernel_polynomial is None:
self.__init_kernel_polynomial()
return self.__kernel_polynomial
def set_pre_isomorphism(self, preWI):
r"""
Modify this isogeny by precomposing with a Weierstrass isomorphism.
EXAMPLES::
sage: E = EllipticCurve(GF(31), [1,1,0,1,-1])
sage: R.<x> = GF(31)[]
sage: f = x^3 + 9*x^2 + x + 30
sage: phi = EllipticCurveIsogeny(E, f)
sage: Epr = E.short_weierstrass_model()
sage: isom = Epr.isomorphism_to(E)
sage: phi.set_pre_isomorphism(isom)
sage: phi.rational_maps()
((-6*x^4 - 3*x^3 + 12*x^2 + 10*x - 1)/(x^3 + x - 12), (3*x^7 + x^6*y - 14*x^6 - 3*x^5 + 5*x^4*y + 7*x^4 + 8*x^3*y - 8*x^3 - 5*x^2*y + 5*x^2 - 14*x*y + 14*x - 6*y - 6)/(x^6 + 2*x^4 + 7*x^3 + x^2 + 7*x - 11))
sage: phi(Epr((0,22)))
(13 : 21 : 1)
sage: phi(Epr((3,7)))
(14 : 17 : 1)
sage: E = EllipticCurve(GF(29), [0,0,0,1,0])
sage: R.<x> = GF(29)[]
sage: f = x^2 + 5
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi
Isogeny of degree 5 from Elliptic Curve defined by y^2 = x^3 + x over Finite Field of size 29 to Elliptic Curve defined by y^2 = x^3 + 20*x over Finite Field of size 29
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: inv_isom = WeierstrassIsomorphism(E, (1,-2,5,10))
sage: Epr = inv_isom.codomain().codomain()
sage: isom = Epr.isomorphism_to(E)
sage: phi.set_pre_isomorphism(isom); phi
Isogeny of degree 5 from Elliptic Curve defined by y^2 + 10*x*y + 20*y = x^3 + 27*x^2 + 6 over Finite Field of size 29 to Elliptic Curve defined by y^2 = x^3 + 20*x over Finite Field of size 29
sage: phi(Epr((12,1)))
(26 : 0 : 1)
sage: phi(Epr((2,9)))
(0 : 0 : 1)
sage: phi(Epr((21,12)))
(3 : 0 : 1)
sage: phi.rational_maps()[0]
(x^5 - 10*x^4 - 6*x^3 - 7*x^2 - x + 3)/(x^4 - 8*x^3 + 5*x^2 - 14*x - 6)
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]
sage: f = x^2 - 21*x + 80
sage: phi = EllipticCurveIsogeny(E, f); phi
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: Epr = E.short_weierstrass_model()
sage: isom = Epr.isomorphism_to(E)
sage: phi.set_pre_isomorphism(isom)
sage: phi
Isogeny of degree 5 from Elliptic Curve defined by y^2 = x^3 - 13392*x - 1080432 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: phi(Epr((168,1188)))
(0 : 1 : 0)
"""
WIdom = preWI.domain().codomain()
WIcod = preWI.codomain().codomain()
if not isinstance(preWI, WeierstrassIsomorphism):
raise ValueError("Invalid parameter: isomorphism must be of type Weierstrass isomorphism.")
if (self.__E1 != WIcod):
raise ValueError("Invalid parameter: isomorphism must have codomain curve equal to this isogenies' domain.")
if (self.__pre_isomorphism is None):
isom = preWI
domain = WIdom
else:
isom = self.__pre_isomorphism*preWI
domain = WIdom
self.__clear_cached_values()
self.__set_pre_isomorphism(domain, isom)
return
def set_post_isomorphism(self, postWI):
r"""
Modify this isogeny by postcomposing with a Weierstrass isomorphism.
EXAMPLES::
sage: E = EllipticCurve(j=GF(31)(0))
sage: R.<x> = GF(31)[]
sage: phi = EllipticCurveIsogeny(E, x+18)
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: phi.set_post_isomorphism(WeierstrassIsomorphism(phi.codomain(), (6,8,10,12)))
sage: phi
Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 31 to Elliptic Curve defined by y^2 + 24*x*y + 7*y = x^3 + 22*x^2 + 16*x + 20 over Finite Field of size 31
sage: E = EllipticCurve(j=GF(47)(0))
sage: f = E.torsion_polynomial(3)/3
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: post_isom = E2.isomorphism_to(E)
sage: phi.set_post_isomorphism(post_isom)
sage: phi.rational_maps() == E.multiplication_by_m(3)
False
sage: phi.switch_sign()
sage: phi.rational_maps() == E.multiplication_by_m(3)
True
Example over a number field::
sage: R.<x> = QQ[]
sage: K.<a> = NumberField(x^2 + 2)
sage: E = EllipticCurve(j=K(1728))
sage: ker_list = E.torsion_points()
sage: phi = EllipticCurveIsogeny(E, ker_list)
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: post_isom = WeierstrassIsomorphism(phi.codomain(), (a,2,3,5))
sage: phi
Isogeny of degree 4 from Elliptic Curve defined by y^2 = x^3 + x over Number Field in a with defining polynomial x^2 + 2 to Elliptic Curve defined by y^2 = x^3 + (-44)*x + 112 over Number Field in a with defining polynomial x^2 + 2
"""
WIdom = postWI.domain().codomain()
WIcod = postWI.codomain().codomain()
if not isinstance(postWI, WeierstrassIsomorphism):
raise ValueError("Invalid parameter: isomorphism must be of type Weierstrass isomorphism.")
if (self.__E2 != WIdom):
raise ValueError("Invalid parameter: isomorphism must have domain curve equal to this isogenies' codomain.")
if (self.__post_isomorphism is None):
isom = postWI
codomain = WIcod
else:
isom = postWI*self.__post_isomorphism
codomain = WIcod
self.__clear_cached_values()
self.__set_post_isomorphism(codomain, isom)
return
def get_pre_isomorphism(self):
r"""
Return the pre-isomorphism of this isogeny, or ``None``.
EXAMPLES::
sage: E = EllipticCurve(GF(31), [1,1,0,1,-1])
sage: R.<x> = GF(31)[]
sage: f = x^3 + 9*x^2 + x + 30
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi.get_post_isomorphism()
sage: Epr = E.short_weierstrass_model()
sage: isom = Epr.isomorphism_to(E)
sage: phi.set_pre_isomorphism(isom)
sage: isom == phi.get_pre_isomorphism()
True
sage: E = EllipticCurve(GF(83), [1,0,1,1,0])
sage: R.<x> = GF(83)[]; f = x+24
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: phi2 = EllipticCurveIsogeny(E, None, E2, 2)
sage: phi2.get_pre_isomorphism()
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 + x over Finite Field of size 83
To: Abelian group of points on Elliptic Curve defined by y^2 = x^3 + 62*x + 74 over Finite Field of size 83
Via: (u,r,s,t) = (1, 76, 41, 3)
"""
return self.__pre_isomorphism
def get_post_isomorphism(self):
r"""
Return the post-isomorphism of this isogeny, or ``None``.
EXAMPLES::
sage: E = EllipticCurve(j=GF(31)(0))
sage: R.<x> = GF(31)[]
sage: phi = EllipticCurveIsogeny(E, x+18)
sage: phi.get_post_isomorphism()
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: isom = WeierstrassIsomorphism(phi.codomain(), (6,8,10,12))
sage: phi.set_post_isomorphism(isom)
sage: isom == phi.get_post_isomorphism()
True
sage: E = EllipticCurve(GF(83), [1,0,1,1,0])
sage: R.<x> = GF(83)[]; f = x+24
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: phi2 = EllipticCurveIsogeny(E, None, E2, 2)
sage: phi2.get_post_isomorphism()
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 = x^3 + 65*x + 69 over Finite Field of size 83
To: Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 + 4*x + 16 over Finite Field of size 83
Via: (u,r,s,t) = (1, 7, 42, 42)
"""
return self.__post_isomorphism
def switch_sign(self):
r"""
Compose this isogeny with `[-1]` (negation).
EXAMPLES::
sage: E = EllipticCurve(GF(23), [0,0,0,1,0])
sage: f = E.torsion_polynomial(3)/3
sage: phi = EllipticCurveIsogeny(E, f, E)
sage: phi.rational_maps() == E.multiplication_by_m(3)
False
sage: phi.switch_sign()
sage: phi.rational_maps() == E.multiplication_by_m(3)
True
sage: E = EllipticCurve(GF(17), [-2, 3, -5, 7, -11])
sage: R.<x> = GF(17)[]
sage: f = x+6
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 + 15*x*y + 12*y = x^3 + 3*x^2 + 7*x + 6 over Finite Field of size 17 to Elliptic Curve defined by y^2 + 15*x*y + 12*y = x^3 + 3*x^2 + 4*x + 8 over Finite Field of size 17
sage: phi.rational_maps()
((x^2 + 6*x + 4)/(x + 6), (x^2*y - 5*x*y + 8*x - 2*y)/(x^2 - 5*x + 2))
sage: phi.switch_sign()
sage: phi
Isogeny of degree 2 from Elliptic Curve defined by y^2 + 15*x*y + 12*y = x^3 + 3*x^2 + 7*x + 6 over Finite Field of size 17 to Elliptic Curve defined by y^2 + 15*x*y + 12*y = x^3 + 3*x^2 + 4*x + 8 over Finite Field of size 17
sage: phi.rational_maps()
((x^2 + 6*x + 4)/(x + 6),
(2*x^3 - x^2*y - 5*x^2 + 5*x*y - 4*x + 2*y + 7)/(x^2 - 5*x + 2))
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]
sage: f = x^2 - 21*x + 80
sage: phi = EllipticCurveIsogeny(E, f)
sage: (xmap1, ymap1) = phi.rational_maps()
sage: phi.switch_sign()
sage: (xmap2, ymap2) = phi.rational_maps()
sage: xmap1 == xmap2
True
sage: ymap1 == -ymap2 - E.a1()*xmap2 - E.a3()
True
sage: K.<a> = NumberField(x^2 + 1)
sage: E = EllipticCurve(K, [0,0,0,1,0])
sage: R.<x> = K[]
sage: phi = EllipticCurveIsogeny(E, x-a)
sage: phi.rational_maps()
((x^2 + (-a)*x - 2)/(x + (-a)), (x^2*y + (-2*a)*x*y + y)/(x^2 + (-2*a)*x - 1))
sage: phi.switch_sign()
sage: phi.rational_maps()
((x^2 + (-a)*x - 2)/(x + (-a)), (-x^2*y + (2*a)*x*y - y)/(x^2 + (-2*a)*x - 1))
"""
self.set_post_isomorphism(WeierstrassIsomorphism(self.__E2, (-1,0,-self.__E2.a1(),-self.__E2.a3())))
def is_normalized(self, via_formal=True, check_by_pullback=True):
r"""
Return whether this isogeny is normalized.
.. NOTE::
An isogeny `\varphi\colon E\to E_2` between two given
Weierstrass equations is said to be normalized if the
constant `c` is `1` in `\varphi*(\omega_2) = c\cdot\omega`,
where `\omega` and `omega_2` are the invariant
differentials on `E` and `E_2` corresponding to the given
equation.
INPUT:
- ``via_formal`` - (default: ``True``) If ``True`` it simply
checks if the leading term of the formal series is
1. Otherwise it uses a deprecated algorithm involving the
second optional argument.
- ``check_by_pullback`` - (default:``True``) Deprecated.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: E = EllipticCurve(GF(7), [0,0,0,1,0])
sage: R.<x> = GF(7)[]
sage: phi = EllipticCurveIsogeny(E, x)
sage: phi.is_normalized()
True
sage: isom = WeierstrassIsomorphism(phi.codomain(), (3, 0, 0, 0))
sage: phi.set_post_isomorphism(isom)
sage: phi.is_normalized()
False
sage: isom = WeierstrassIsomorphism(phi.codomain(), (5, 0, 0, 0))
sage: phi.set_post_isomorphism(isom)
sage: phi.is_normalized()
True
sage: isom = WeierstrassIsomorphism(phi.codomain(), (1, 1, 1, 1))
sage: phi.set_post_isomorphism(isom)
sage: phi.is_normalized()
True
sage: F = GF(2^5, 'alpha'); alpha = F.gen()
sage: E = EllipticCurve(F, [1,0,1,1,1])
sage: R.<x> = F[]
sage: phi = EllipticCurveIsogeny(E, x+1)
sage: isom = WeierstrassIsomorphism(phi.codomain(), (alpha, 0, 0, 0))
sage: phi.is_normalized()
True
sage: phi.set_post_isomorphism(isom)
sage: phi.is_normalized()
False
sage: isom = WeierstrassIsomorphism(phi.codomain(), (1/alpha, 0, 0, 0))
sage: phi.set_post_isomorphism(isom)
sage: phi.is_normalized()
True
sage: isom = WeierstrassIsomorphism(phi.codomain(), (1, 1, 1, 1))
sage: phi.set_post_isomorphism(isom)
sage: phi.is_normalized()
True
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]
sage: f = x^3 - x^2 - 10*x - 79/4
sage: phi = EllipticCurveIsogeny(E, f)
sage: isom = WeierstrassIsomorphism(phi.codomain(), (2, 0, 0, 0))
sage: phi.is_normalized()
True
sage: phi.set_post_isomorphism(isom)
sage: phi.is_normalized()
False
sage: isom = WeierstrassIsomorphism(phi.codomain(), (1/2, 0, 0, 0))
sage: phi.set_post_isomorphism(isom)
sage: phi.is_normalized()
True
sage: isom = WeierstrassIsomorphism(phi.codomain(), (1, 1, 1, 1))
sage: phi.set_post_isomorphism(isom)
sage: phi.is_normalized()
True
"""
# easy algorithm using the formal expansion.
if via_formal:
phi_formal = self.formal(prec=5)
return phi_formal[1] == 1
# this is the old algorithm. it should be deprecated.
check_prepost_isomorphism = False
f_normalized = True
if (check_by_pullback):
(Xmap, Ymap) = self.rational_maps()
E1 = self.__E1
E2 = self.__E2
a1 = E1.a1()
a3 = E1.a3()
a1pr = E2.a1()
a3pr = E2.a3()
x, y = self.__mpoly_ring.gens()
Xmap_pr = Xmap.derivative(x)
domain_inv_diff = 1/(2*y + a1*x + a3)
codomain_inv_diff = Xmap_pr/(2*Ymap + a1pr*Xmap + a3pr)
inv_diff_quo = domain_inv_diff/codomain_inv_diff
if (1 == inv_diff_quo):
f_normalized = True
else:
# For some reason, in certain cases, when the isogeny
# is pre or post composed with a translation the
# resulting rational functions are too complicated for
# sage to simplify down to a constant in this case, we
# do some cheating by checking if the post-composition
# by isogeny has a non 1 scaling factor
if ( inv_diff_quo.numerator().is_constant() and (inv_diff_quo.denominator().is_constant) ):
f_normalized = False
else:
check_prepost_isomorphism = True
else:
check_prepost_isomorphism = True
# If we skip checking by the pullback of the invariant
# differential OR if that was inconclusive We explicitly check
# if there is a post isomorphism and if it has a non 1 scaling
# factor or if it is a just a translation. NOTE: This only
# works because we are using algorithms for calculating the
# isogenies that calculate a separable normalized isogeny, if
# this changes, this check will no longer be correct.
#
if (check_prepost_isomorphism):
post_isom = self.__post_isomorphism
if (post_isom is not None):
if (1 == self.__base_field(post_isom.u)):
f_post_normalized = True
else:
f_post_normalized = False
else:
f_post_normalized = True
pre_isom = self.__pre_isomorphism
if (pre_isom is not None):
if (1 == self.__base_field(pre_isom.u)):
f_pre_normalized = True
else:
f_pre_normalized = False
else:
f_pre_normalized = True
f_normalized = f_pre_normalized and f_post_normalized
return f_normalized
def dual(self):
r"""
Return the isogeny dual to this isogeny.
.. NOTE::
If `\varphi\colon E \to E_2` is the given isogeny and `n`
is its degree, then the dual is by definition the unique
isogeny `\hat\varphi\colon E_2\to E` such that the
compositions `\hat\varphi\circ\varphi` and
`\varphi\circ\hat\varphi` are the multiplication-by-`n`
maps on `E` and `E_2`, respectively.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]
sage: f = x^2 - 21*x + 80
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi_hat = phi.dual()
sage: phi_hat.domain() == phi.codomain()
True
sage: phi_hat.codomain() == phi.domain()
True
sage: (X, Y) = phi.rational_maps()
sage: (Xhat, Yhat) = phi_hat.rational_maps()
sage: Xm = Xhat.subs(x=X, y=Y)
sage: Ym = Yhat.subs(x=X, y=Y)
sage: (Xm, Ym) == E.multiplication_by_m(5)
True
sage: E = EllipticCurve(GF(37), [0,0,0,1,8])
sage: R.<x> = GF(37)[]
sage: f = x^3 + x^2 + 28*x + 33
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi_hat = phi.dual()
sage: phi_hat.codomain() == phi.domain()
True
sage: phi_hat.domain() == phi.codomain()
True
sage: (X, Y) = phi.rational_maps()
sage: (Xhat, Yhat) = phi_hat.rational_maps()
sage: Xm = Xhat.subs(x=X, y=Y)
sage: Ym = Yhat.subs(x=X, y=Y)
sage: (Xm, Ym) == E.multiplication_by_m(7)
True
sage: E = EllipticCurve(GF(31), [0,0,0,1,8])
sage: R.<x> = GF(31)[]
sage: f = x^2 + 17*x + 29
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi_hat = phi.dual()
sage: phi_hat.codomain() == phi.domain()
True
sage: phi_hat.domain() == phi.codomain()
True
sage: (X, Y) = phi.rational_maps()
sage: (Xhat, Yhat) = phi_hat.rational_maps()
sage: Xm = Xhat.subs(x=X, y=Y)
sage: Ym = Yhat.subs(x=X, y=Y)
sage: (Xm, Ym) == E.multiplication_by_m(5)
True
Test for :trac:`23928`::
sage: E = EllipticCurve(j=GF(431**2)(4))
sage: phi = E.isogeny(E.lift_x(0))
sage: phi.dual()
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 427*x over Finite Field in z2 of size 431^2 to Elliptic Curve defined by y^2 = x^3 + x over Finite Field in z2 of size 431^2
Test (for :trac:`7096`)::
sage: E = EllipticCurve('11a1')
sage: phi = E.isogeny(E(5,5))
sage: phi.dual().dual() == phi
True
sage: k = GF(103)
sage: E = EllipticCurve(k,[11,11])
sage: phi = E.isogeny(E(4,4))
sage: phi
Isogeny of degree 5 from Elliptic Curve defined by y^2 = x^3 + 11*x + 11 over Finite Field of size 103 to Elliptic Curve defined by y^2 = x^3 + 25*x + 80 over Finite Field of size 103
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism
sage: phi.set_post_isomorphism(WeierstrassIsomorphism(phi.codomain(),(5,0,1,2)))
sage: phi.dual().dual() == phi
True
sage: E = EllipticCurve(GF(103),[1,0,0,1,-1])
sage: phi = E.isogeny(E(60,85))
sage: phi.dual()
Isogeny of degree 7 from Elliptic Curve defined by y^2 + x*y = x^3 + 84*x + 34 over Finite Field of size 103 to Elliptic Curve defined by y^2 + x*y = x^3 + x + 102 over Finite Field of size 103
Check that :trac:`17293` is fixed:
sage: k.<s> = QuadraticField(2)
sage: E = EllipticCurve(k, [-3*s*(4 + 5*s), 2*s*(2 + 14*s + 11*s^2)])
sage: phi = E.isogenies_prime_degree(3)[0]
sage: (-phi).dual() == -(phi.dual())
True
sage: phi._EllipticCurveIsogeny__clear_cached_values() # forget the dual
sage: -(phi.dual()) == (-phi).dual()
True
"""
if (self.__base_field.characteristic() in [2,3]):
raise NotImplementedError("Computation of dual isogenies not yet implemented in characteristics 2 and 3")
if (self.__dual is not None):
return self.__dual
# trac 7096
(E1, E2pr, pre_isom, post_isom) = compute_intermediate_curves(self.codomain(), self.domain())
F = self.__base_field
d = self.__degree
# trac 7096
if F(d) == 0:
raise NotImplementedError("The dual isogeny is not separable: only separable isogenies are currently implemented")
# trac 7096
# this should take care of the case when the isogeny is not normalized.
u = self.formal()[1]
isom = WeierstrassIsomorphism(E2pr, (u/F(d), 0, 0, 0))
E2 = isom.codomain().codomain()
pre_isom = self.__E2.isomorphism_to(E1)
post_isom = E2.isomorphism_to(self.__E1)
phi_hat = EllipticCurveIsogeny(E1, None, E2, d)
phi_hat.set_pre_isomorphism(pre_isom)
phi_hat.set_post_isomorphism(post_isom)
phi_hat.__perform_inheritance_housekeeping()
assert phi_hat.codomain() == self.domain()
# trac 7096 : this adjusts a posteriori the automorphism on
# the codomain of the dual isogeny. we used _a_ Weierstrass
# isomorphism to get to the original curve, but we may have to
# change it by an automorphism. We impose the condition that
# the composition has the degree as a leading coefficient in
# the formal expansion.
phi_sc = self.formal()[1]
phihat_sc = phi_hat.formal()[1]
sc = phi_sc * phihat_sc/F(d)
if sc == 0:
raise RuntimeError("Bug in computing dual isogeny: sc = 0")
if sc != 1:
auts = self.__E1.automorphisms()
aut = [a for a in auts if a.u == sc]
if len(aut) != 1:
raise ValueError("There is a bug in dual().")
phi_hat.set_post_isomorphism(aut[0])
self.__dual = phi_hat
return phi_hat
def formal(self,prec=20):
r"""
Return the formal isogeny as a power series in the variable
`t=-x/y` on the domain curve.
INPUT:
- ``prec`` - (default = 20), the precision with which the
computations in the formal group are carried out.
EXAMPLES::
sage: E = EllipticCurve(GF(13),[1,7])
sage: phi = E.isogeny(E(10,4))
sage: phi.formal()
t + 12*t^13 + 2*t^17 + 8*t^19 + 2*t^21 + O(t^23)
sage: E = EllipticCurve([0,1])
sage: phi = E.isogeny(E(2,3))
sage: phi.formal(prec=10)
t + 54*t^5 + 255*t^7 + 2430*t^9 + 19278*t^11 + O(t^13)
sage: E = EllipticCurve('11a2')
sage: R.<x> = QQ[]
sage: phi = E.isogeny(x^2 + 101*x + 12751/5)
sage: phi.formal(prec=7)
t - 2724/5*t^5 + 209046/5*t^7 - 4767/5*t^8 + 29200946/5*t^9 + O(t^10)
"""
Eh = self.__E1.formal()
f, g = self.rational_maps()
xh = Eh.x(prec=prec)
if xh.valuation() != -2:
raise RuntimeError("xh has valuation %s (should be -2)" % xh.valuation())
yh = Eh.y(prec=prec)
if yh.valuation() != -3:
raise RuntimeError("yh has valuation %s (should be -3)" % yh.valuation())
fh = f(xh,yh)
if fh.valuation() != -2:
raise RuntimeError("fh has valuation %s (should be -2)" % fh.valuation())
gh = g(xh,yh)
if gh.valuation() != -3:
raise RuntimeError("gh has valuation %s (should be -3)" % gh.valuation())
th = -fh/gh
if th.valuation() != 1:
raise RuntimeError("th has valuation %s (should be +1)" % th.valuation())
return th
#
# Overload Morphism methods that we want to
#
def is_injective(self):
r"""
Return ``True`` if and only if this isogeny has trivial
kernel.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]
sage: f = x^2 + x - 29/5
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi.is_injective()
False
sage: phi = EllipticCurveIsogeny(E, R(1))
sage: phi.is_injective()
True
sage: F = GF(7)
sage: E = EllipticCurve(j=F(0))
sage: phi = EllipticCurveIsogeny(E, [ E((0,-1)), E((0,1))])
sage: phi.is_injective()
False
sage: phi = EllipticCurveIsogeny(E, E(0))
sage: phi.is_injective()
True
"""
if (1 < self.__degree): return False
return True
def is_surjective(self):
r"""
Return ``True`` if and only if this isogeny is surjective.
.. NOTE::
This function always returns ``True``, as a non-constant
map of algebraic curves must be surjective, and this class
does not model the constant `0` isogeny.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]
sage: f = x^2 + x - 29/5
sage: phi = EllipticCurveIsogeny(E, f)
sage: phi.is_surjective()
True
sage: E = EllipticCurve(GF(7), [0,0,0,1,0])
sage: phi = EllipticCurveIsogeny(E, E((0,0)))
sage: phi.is_surjective()
True
sage: F = GF(2^5, 'omega')
sage: E = EllipticCurve(j=F(0))
sage: R.<x> = F[]
sage: phi = EllipticCurveIsogeny(E, x)
sage: phi.is_surjective()
True
"""
return True
def is_zero(self):
r"""
Return whether this isogeny is zero.
.. NOTE::
Currently this class does not allow zero isogenies, so this
function will always return True.
EXAMPLES::
sage: E = EllipticCurve(j=GF(7)(0))
sage: phi = EllipticCurveIsogeny(E, [ E((0,1)), E((0,-1))])
sage: phi.is_zero()
False
"""
return self.degree().is_zero()
def post_compose(self, left):
r"""
Return the post-composition of this isogeny with ``left``.
EXAMPLES::
sage: E = EllipticCurve(j=GF(7)(0))
sage: phi = EllipticCurveIsogeny(E, [ E((0,1)), E((0,-1))])
sage: phi.post_compose(phi)
Traceback (most recent call last):
...
NotImplementedError: post-composition of isogenies not yet implemented
"""
raise NotImplementedError("post-composition of isogenies not yet implemented")
def pre_compose(self, right):
r"""
Return the pre-composition of this isogeny with ``right``.
EXAMPLES::
sage: E = EllipticCurve(j=GF(7)(0))
sage: phi = EllipticCurveIsogeny(E, [ E((0,1)), E((0,-1))])
sage: phi.pre_compose(phi)
Traceback (most recent call last):
...
NotImplementedError: pre-composition of isogenies not yet implemented
"""
raise NotImplementedError("pre-composition of isogenies not yet implemented")
def n(self):
r"""
Numerical Approximation inherited from Map (through morphism),
nonsensical for isogenies.
EXAMPLES::
sage: E = EllipticCurve(j=GF(7)(0))
sage: phi = EllipticCurveIsogeny(E, [ E((0,1)), E((0,-1))])
sage: phi.n()
Traceback (most recent call last):
...
NotImplementedError: Numerical approximations do not make sense for Elliptic Curve Isogenies
"""
raise NotImplementedError("Numerical approximations do not make sense for Elliptic Curve Isogenies")
def compute_isogeny_starks(E1, E2, ell):
r"""
Return the kernel polynomials of an isogeny of degree ``ell``
between ``E1`` and ``E2``.
INPUT:
- ``E1`` - an elliptic curve in short Weierstrass form.
- ``E2`` - an elliptic curve in short Weierstrass form.
- ``ell`` - the degree of the isogeny from E1 to E2.
OUTPUT:
polynomial over the field of definition of ``E1``, ``E2``, that is
the kernel polynomial of the isogeny from ``E1`` to ``E2``.
.. NOTE::
There must be a degree ``ell``, separable, normalized cyclic
isogeny from ``E1`` to ``E2``, or an error will be raised.
ALGORITHM:
This function uses Starks Algorithm as presented in section 6.2 of
[BMSS]_.
.. NOTE::
As published in [BMSS]_, the algorithm is incorrect, and a
correct version (with slightly different notation) can be found
in [M09]_. The algorithm originates in [S72]_.
REFERENCES:
.. [BMSS] Boston, Morain, Salvy, Schost, "Fast Algorithms for Isogenies."
.. [M09] Moody, "The Diffie-Hellman Problem and Generalization of Verheul's Theorem"
.. [S72] Stark, "Class-numbers of complex quadratic fields."
EXAMPLES::
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_isogeny_starks, compute_sequence_of_maps
sage: E = EllipticCurve(GF(97), [1,0,1,1,0])
sage: R.<x> = GF(97)[]; f = x^5 + 27*x^4 + 61*x^3 + 58*x^2 + 28*x + 21
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: (isom1, isom2, E1pr, E2pr, ker_poly) = compute_sequence_of_maps(E, E2, 11)
sage: compute_isogeny_starks(E1pr, E2pr, 11)
x^10 + 37*x^9 + 53*x^8 + 66*x^7 + 66*x^6 + 17*x^5 + 57*x^4 + 6*x^3 + 89*x^2 + 53*x + 8
sage: E = EllipticCurve(GF(37), [0,0,0,1,8])
sage: R.<x> = GF(37)[]
sage: f = (x + 14) * (x + 30)
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: compute_isogeny_starks(E, E2, 5)
x^4 + 14*x^3 + x^2 + 34*x + 21
sage: f**2
x^4 + 14*x^3 + x^2 + 34*x + 21
sage: E = EllipticCurve(QQ, [0,0,0,1,0])
sage: R.<x> = QQ[]
sage: f = x
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: compute_isogeny_starks(E, E2, 2)
x
"""
K = E1.base_field()
R = PolynomialRing(K, 'x')
x = R.gen()
wp1 = E1.weierstrass_p(prec=4*ell+4) #BMSS claim 2*ell is enough, but it is not M09
wp2 = E2.weierstrass_p(prec=4*ell+4)
# viewed them as power series in Z = z^2
S = LaurentSeriesRing(K, 'Z')
Z = S.gen()
pe1 = 1/Z
pe2 = 1/Z
for i in range(2 * ell + 1):
pe1 += wp1[2*i] * Z**i
pe2 += wp2[2*i] * Z**i
pe1 = pe1.add_bigoh(2*ell+2)
pe2 = pe2.add_bigoh(2*ell+2)
n = 1
q = [R(1), R(0)]
T = pe2
while ( q[n].degree() < (ell-1) ):
n += 1
a_n = 0
r = -T.valuation()
while (0 <= r):
t_r = T[-r]
a_n = a_n + t_r * x**r
T = T - t_r*pe1**r
r = -T.valuation()
q_n = a_n*q[n-1] + q[n-2]
q.append(q_n)
if (n == ell+1 or T == 0):
if (T == 0 or T.valuation()<2):
raise ValueError("The two curves are not linked by a cyclic normalized isogeny of degree %s" % ell)
break
T = 1/T
qn = q[n]
qn = (1/qn.leading_coefficient())*qn
return qn
def split_kernel_polynomial(poly):
r"""
Internal helper function for ``compute_isogeny_kernel_polynomial``.
INPUT:
- ``poly`` -- a nonzero univariate polynomial.
OUTPUT:
The maximum separable divisor of ``poly``. If the input is a full
kernel polynomial where the roots which are `x`-coordinates of
points of order greater than 2 have multiplicity 1, the output
will be a polynomial with the same roots, all of multiplicity 1.
EXAMPLES:
The following example implicitly exercises this function::
sage: E = EllipticCurve(GF(37), [0,0,0,1,8])
sage: R.<x> = GF(37)[]
sage: f = (x + 10) * (x + 12) * (x + 16)
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_isogeny_starks
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import split_kernel_polynomial
sage: ker_poly = compute_isogeny_starks(E, E2, 7); ker_poly
x^6 + 2*x^5 + 20*x^4 + 11*x^3 + 36*x^2 + 35*x + 16
sage: ker_poly.factor()
(x + 10)^2 * (x + 12)^2 * (x + 16)^2
sage: poly = split_kernel_polynomial(ker_poly); poly
x^3 + x^2 + 28*x + 33
sage: poly.factor()
(x + 10) * (x + 12) * (x + 16)
"""
from sage.misc.all import prod
return prod([p for p,e in poly.squarefree_decomposition()])
def compute_isogeny_kernel_polynomial(E1, E2, ell, algorithm="starks"):
r"""
Return the kernel polynomial of an isogeny of degree ``ell``
between ``E1`` and ``E2``.
INPUT:
- ``E1`` - an elliptic curve in short Weierstrass form.
- ``E2`` - an elliptic curve in short Weierstrass form.
- ``ell`` - the degree of the isogeny from ``E1`` to ``E2``.
- ``algorithm`` - currently only ``starks`` (default) is implemented.
OUTPUT:
polynomial over the field of definition of ``E1``, ``E2``, that is
the kernel polynomial of the isogeny from ``E1`` to ``E2``.
.. NOTE::
If there is no degree ``ell``, cyclic, separable, normalized
isogeny from ``E1`` to ``E2`` then an error will be raised.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_isogeny_kernel_polynomial
sage: E = EllipticCurve(GF(37), [0,0,0,1,8])
sage: R.<x> = GF(37)[]
sage: f = (x + 14) * (x + 30)
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: compute_isogeny_kernel_polynomial(E, E2, 5)
x^2 + 7*x + 13
sage: f
x^2 + 7*x + 13
sage: R.<x> = QQ[]
sage: K.<i> = NumberField(x^2 + 1)
sage: E = EllipticCurve(K, [0,0,0,1,0])
sage: E2 = EllipticCurve(K, [0,0,0,16,0])
sage: compute_isogeny_kernel_polynomial(E, E2, 4)
x^3 + x
"""
return split_kernel_polynomial(compute_isogeny_starks(E1, E2, ell))
def compute_intermediate_curves(E1, E2):
r"""
Return intermediate curves and isomorphisms.
.. NOTE::
This is used so we can compute `\wp` functions from the short
Weierstrass model more easily.
.. WARNING::
The base field must be of characteristic not equal to 2,3.
INPUT:
- ``E1`` - an elliptic curve
- ``E2`` - an elliptic curve
OUTPUT:
tuple (``pre_isomorphism``, ``post_isomorphism``,
``intermediate_domain``, ``intermediate_codomain``):
- ``intermediate_domain``: a short Weierstrass model isomorphic to
``E1``
- ``intermediate_codomain``: a short Weierstrass model isomorphic
to ``E2``
- ``pre_isomorphism``: normalized isomorphism from ``E1`` to
intermediate_domain
- ``post_isomorphism``: normalized isomorphism from
intermediate_codomain to ``E2``
EXAMPLES::
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_intermediate_curves
sage: E = EllipticCurve(GF(83), [1,0,1,1,0])
sage: R.<x> = GF(83)[]; f = x+24
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: compute_intermediate_curves(E, E2)
(Elliptic Curve defined by y^2 = x^3 + 62*x + 74 over Finite Field of size 83,
Elliptic Curve defined by y^2 = x^3 + 65*x + 69 over Finite Field of size 83,
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 + x over Finite Field of size 83
To: Abelian group of points on Elliptic Curve defined by y^2 = x^3 + 62*x + 74 over Finite Field of size 83
Via: (u,r,s,t) = (1, 76, 41, 3),
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 = x^3 + 65*x + 69 over Finite Field of size 83
To: Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 + 4*x + 16 over Finite Field of size 83
Via: (u,r,s,t) = (1, 7, 42, 42))
sage: R.<x> = QQ[]
sage: K.<i> = NumberField(x^2 + 1)
sage: E = EllipticCurve(K, [0,0,0,1,0])
sage: E2 = EllipticCurve(K, [0,0,0,16,0])
sage: compute_intermediate_curves(E, E2)
(Elliptic Curve defined by y^2 = x^3 + x over Number Field in i with defining polynomial x^2 + 1,
Elliptic Curve defined by y^2 = x^3 + 16*x over Number Field in i with defining polynomial x^2 + 1,
Generic endomorphism of Abelian group of points on Elliptic Curve defined by y^2 = x^3 + x over Number Field in i with defining polynomial x^2 + 1
Via: (u,r,s,t) = (1, 0, 0, 0),
Generic endomorphism of Abelian group of points on Elliptic Curve defined by y^2 = x^3 + 16*x over Number Field in i with defining polynomial x^2 + 1
Via: (u,r,s,t) = (1, 0, 0, 0))
"""
if (E1.base_ring().characteristic() in [2,3]):
raise NotImplementedError("compute_intermediate_curves is only defined for characteristics not 2 or 3")
# We cannot just use
# E1w = E1.short_weierstrass_model()
# E2w = E2.short_weierstrass_model()
# as the resulting isomorphisms would not be normalised (u=1)
c4, c6 = E1.c_invariants()
E1w = EllipticCurve([0,0,0,-c4/48, -c6/864])
c4, c6 = E2.c_invariants()
E2w = EllipticCurve([0,0,0,-c4/48, -c6/864])
# We cannot even just use pre_iso = E1.isomorphism_to(E1w) since
# it may have u=-1; similarly for E2
urst = [w for w in isomorphisms(E1,E1w) if w[0]==1][0]
pre_iso = WeierstrassIsomorphism(E1,urst,E1w)
urst = [w for w in isomorphisms(E2w,E2) if w[0]==1][0]
post_iso = WeierstrassIsomorphism(E2w,urst,E2)
return (E1w, E2w, pre_iso, post_iso)
def compute_sequence_of_maps(E1, E2, ell):
r"""
Return intermediate curves, isomorphisms and kernel polynomial.
INPUT:
- ``E1``, ``E2`` -- elliptic curves.
- ``ell`` -- a prime such that there is a degree ``ell`` separable
normalized isogeny from ``E1`` to ``E2``.
OUTPUT:
(pre_isom, post_isom, E1pr, E2pr, ker_poly) where:
- ``E1pr`` is an elliptic curve in short Weierstrass form
isomorphic to ``E1``;
- ``E2pr`` is an elliptic curve in short Weierstrass form
isomorphic to ``E2``;
- ``pre_isom`` is a normalised isomorphism from ``E1`` to
``E1pr``;
- ``post_isom`` is a normalised isomorphism from ``E2pr`` to
``E2``;
- ``ker_poly`` is the kernel polynomial of an ``ell``-isogeny from
``E1pr`` to ``E2pr``.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import compute_sequence_of_maps
sage: E = EllipticCurve('11a1')
sage: R.<x> = QQ[]; f = x^2 - 21*x + 80
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: compute_sequence_of_maps(E, E2, 5)
(Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 = x^3 - 31/3*x - 2501/108 over Rational Field
Via: (u,r,s,t) = (1, 1/3, 0, -1/2),
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 = x^3 - 23461/3*x - 28748141/108 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
Via: (u,r,s,t) = (1, -1/3, 0, 1/2),
Elliptic Curve defined by y^2 = x^3 - 31/3*x - 2501/108 over Rational Field,
Elliptic Curve defined by y^2 = x^3 - 23461/3*x - 28748141/108 over Rational Field,
x^2 - 61/3*x + 658/9)
sage: K.<i> = NumberField(x^2 + 1)
sage: E = EllipticCurve(K, [0,0,0,1,0])
sage: E2 = EllipticCurve(K, [0,0,0,16,0])
sage: compute_sequence_of_maps(E, E2, 4)
(Generic endomorphism of Abelian group of points on Elliptic Curve defined by y^2 = x^3 + x over Number Field in i with defining polynomial x^2 + 1
Via: (u,r,s,t) = (1, 0, 0, 0),
Generic endomorphism of Abelian group of points on Elliptic Curve defined by y^2 = x^3 + 16*x over Number Field in i with defining polynomial x^2 + 1
Via: (u,r,s,t) = (1, 0, 0, 0),
Elliptic Curve defined by y^2 = x^3 + x over Number Field in i with defining polynomial x^2 + 1,
Elliptic Curve defined by y^2 = x^3 + 16*x over Number Field in i with defining polynomial x^2 + 1,
x^3 + x)
sage: E = EllipticCurve(GF(97), [1,0,1,1,0])
sage: R.<x> = GF(97)[]; f = x^5 + 27*x^4 + 61*x^3 + 58*x^2 + 28*x + 21
sage: phi = EllipticCurveIsogeny(E, f)
sage: E2 = phi.codomain()
sage: compute_sequence_of_maps(E, E2, 11)
(Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 + x over Finite Field of size 97
To: Abelian group of points on Elliptic Curve defined by y^2 = x^3 + 52*x + 31 over Finite Field of size 97
Via: (u,r,s,t) = (1, 8, 48, 44),
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 = x^3 + 41*x + 66 over Finite Field of size 97
To: Abelian group of points on Elliptic Curve defined by y^2 + x*y + y = x^3 + 87*x + 26 over Finite Field of size 97
Via: (u,r,s,t) = (1, 89, 49, 49),
Elliptic Curve defined by y^2 = x^3 + 52*x + 31 over Finite Field of size 97,
Elliptic Curve defined by y^2 = x^3 + 41*x + 66 over Finite Field of size 97,
x^5 + 67*x^4 + 13*x^3 + 35*x^2 + 77*x + 69)
"""
(E1pr, E2pr, pre_isom, post_isom) = compute_intermediate_curves(E1, E2)
ker_poly = compute_isogeny_kernel_polynomial(E1pr, E2pr, ell)
return (pre_isom, post_isom, E1pr, E2pr, ker_poly)
# Utility function for manipulating isogeny degree matrices
def fill_isogeny_matrix(M):
"""
Returns a filled isogeny matrix giving all degrees from one giving only prime degrees.
INPUT:
- ``M`` -- a square symmetric matrix whose off-diagonal `i`, `j`
entry is either a prime `l` (if the `i`'th and `j`'th curves
have an `l`-isogeny between them), otherwise is 0.
OUTPUT:
(matrix) a square matrix with entries `1` on the diagonal, and in
general the `i`, `j` entry is `d>0` if `d` is the minimal degree
of an isogeny from the `i`'th to the `j`'th curve,
EXAMPLES::
sage: M = Matrix([[0, 2, 3, 3, 0, 0], [2, 0, 0, 0, 3, 3], [3, 0, 0, 0, 2, 0], [3, 0, 0, 0, 0, 2], [0, 3, 2, 0, 0, 0], [0, 3, 0, 2, 0, 0]]); M
[0 2 3 3 0 0]
[2 0 0 0 3 3]
[3 0 0 0 2 0]
[3 0 0 0 0 2]
[0 3 2 0 0 0]
[0 3 0 2 0 0]
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import fill_isogeny_matrix
sage: fill_isogeny_matrix(M)
[ 1 2 3 3 6 6]
[ 2 1 6 6 3 3]
[ 3 6 1 9 2 18]
[ 3 6 9 1 18 2]
[ 6 3 2 18 1 9]
[ 6 3 18 2 9 1]
"""
from sage.matrix.all import Matrix
from sage.rings.infinity import Infinity
n = M.nrows()
M0 = copy(M)
for i in range(n):
M0[i,i]=1
def fix(d):
if d==0: return Infinity
return d
def fix2(d):
if d==Infinity: return 0
return d
def pr(M1,M2):
return Matrix([[fix2(min([fix(M1[i,k]*M2[k,j]) for k in range(n)])) for i in range(n)] for j in range(n)])
M1 = M0
M2 = pr(M0,M1)
while M1!=M2:
M1 = M2
M2 = pr(M0,M1)
return M1
def unfill_isogeny_matrix(M):
"""
Reverses the action of ``fill_isogeny_matrix``.
INPUT:
- ``M`` -- a square symmetric matrix of integers.
OUTPUT:
(matrix) a square symmetric matrix obtained from ``M`` by
replacing non-prime entries with `0`.
EXAMPLES::
sage: M = Matrix([[0, 2, 3, 3, 0, 0], [2, 0, 0, 0, 3, 3], [3, 0, 0, 0, 2, 0], [3, 0, 0, 0, 0, 2], [0, 3, 2, 0, 0, 0], [0, 3, 0, 2, 0, 0]]); M
[0 2 3 3 0 0]
[2 0 0 0 3 3]
[3 0 0 0 2 0]
[3 0 0 0 0 2]
[0 3 2 0 0 0]
[0 3 0 2 0 0]
sage: from sage.schemes.elliptic_curves.ell_curve_isogeny import fill_isogeny_matrix, unfill_isogeny_matrix
sage: M1 = fill_isogeny_matrix(M); M1
[ 1 2 3 3 6 6]
[ 2 1 6 6 3 3]
[ 3 6 1 9 2 18]
[ 3 6 9 1 18 2]
[ 6 3 2 18 1 9]
[ 6 3 18 2 9 1]
sage: unfill_isogeny_matrix(M1)
[0 2 3 3 0 0]
[2 0 0 0 3 3]
[3 0 0 0 2 0]
[3 0 0 0 0 2]
[0 3 2 0 0 0]
[0 3 0 2 0 0]
sage: unfill_isogeny_matrix(M1) == M
True
"""
n = M.nrows()
M1 = copy(M)
zero = Integer(0)
for i in range(n):
M1[i, i] = zero
for j in range(i):
if not M1[i, j].is_prime():
M1[i, j] = zero
M1[j, i] = zero
return M1
| 36.783764
| 632
| 0.567219
|
63b2242d3e00341f402fcb1ddf1e9b21b3ecb7ba
| 2,800
|
py
|
Python
|
pnc_cli/swagger_client/models/singleton.py
|
SakuragawaAsaba/pnc-cli
|
0e0c5976766f6d2e32980c39ebc30950fc02960e
|
[
"Apache-2.0"
] | null | null | null |
pnc_cli/swagger_client/models/singleton.py
|
SakuragawaAsaba/pnc-cli
|
0e0c5976766f6d2e32980c39ebc30950fc02960e
|
[
"Apache-2.0"
] | 3
|
2015-06-01T22:12:27.000Z
|
2015-10-11T16:20:11.000Z
|
pnc_cli/swagger_client/models/singleton.py
|
SakuragawaAsaba/pnc-cli
|
0e0c5976766f6d2e32980c39ebc30950fc02960e
|
[
"Apache-2.0"
] | 5
|
2015-05-28T18:14:36.000Z
|
2018-07-20T07:38:21.000Z
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class Singleton(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Singleton - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'content': 'object'
}
self.attribute_map = {
'content': 'content'
}
self._content = None
@property
def content(self):
"""
Gets the content of this Singleton.
:return: The content of this Singleton.
:rtype: object
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this Singleton.
:param content: The content of this Singleton.
:type: object
"""
self._content = content
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
| 26.666667
| 77
| 0.577143
|
c0036e6c7b492b9788c40b1295d4658513bfe064
| 5,018
|
py
|
Python
|
docs/conf.py
|
KCRW-org/kcrw.apple_news
|
81c0a402f210e87ae73fe02a3d39b7929c328ceb
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
KCRW-org/kcrw.apple_news
|
81c0a402f210e87ae73fe02a3d39b7929c328ceb
|
[
"MIT"
] | 85
|
2020-03-09T22:40:20.000Z
|
2022-03-31T01:10:18.000Z
|
docs/conf.py
|
KCRW-org/kcrw.apple_news
|
81c0a402f210e87ae73fe02a3d39b7929c328ceb
|
[
"MIT"
] | 2
|
2021-05-12T17:37:09.000Z
|
2021-08-14T11:30:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# kcrw.apple_news documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 30 09:17:41 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../src'))
from kcrw import apple_news # NOQA
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kcrw.apple_news'
copyright = u"2020, KCRW"
author = u"Alec Mitchell"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = apple_news.__version__
# The full version, including alpha/beta/rc tags.
release = apple_news.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
autodoc_default_options = {
'member-order': 'bysource',
'special-members': '__init__',
'undoc-members': False,
}
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'kcrw.apple_newsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'kcrw.apple_news.tex',
u'kcrw.apple_news Documentation',
u'Alec Mitchell', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kcrw.apple_news',
u'kcrw.apple_news Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'kcrw.apple_news',
u'kcrw.apple_news Documentation',
author,
'kcrw.apple_news',
'Apple News API .',
'Miscellaneous'),
]
| 30.228916
| 77
| 0.685731
|
0a5b2ef1c3467c21127cfd982730d7b27332d282
| 8,279
|
py
|
Python
|
gan-script.py
|
godmoves/GAN_MNIST
|
92dddc40f1b85ffaeba3387228ea0cd523b0a524
|
[
"MIT"
] | null | null | null |
gan-script.py
|
godmoves/GAN_MNIST
|
92dddc40f1b85ffaeba3387228ea0cd523b0a524
|
[
"MIT"
] | null | null | null |
gan-script.py
|
godmoves/GAN_MNIST
|
92dddc40f1b85ffaeba3387228ea0cd523b0a524
|
[
"MIT"
] | null | null | null |
"""
This is a straightforward Python implementation of a generative adversarial network.
The code is drawn directly from the O'Reilly interactive tutorial on GANs
(https://www.oreilly.com/learning/generative-adversarial-networks-for-beginners).
A version of this model with explanatory notes is also available on GitHub
at https://github.com/jonbruner/generative-adversarial-networks.
This script requires TensorFlow and its dependencies in order to run. Please see
the readme for guidance on installing TensorFlow.
"""
import tensorflow as tf
import numpy as np
import datetime
# Load MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")
# Define the discriminator network
def discriminator(images, reuse_variables=None):
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables) as scope:
# First convolutional and pool layers
# This finds 32 different 5 x 5 pixel features
d_w1 = tf.get_variable('d_w1', [5, 5, 1, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
d_b1 = tf.get_variable('d_b1', [32], initializer=tf.constant_initializer(0))
d1 = tf.nn.conv2d(input=images, filter=d_w1, strides=[1, 1, 1, 1], padding='SAME')
d1 = d1 + d_b1
d1 = tf.nn.relu(d1)
d1 = tf.nn.avg_pool(d1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Second convolutional and pool layers
# This finds 64 different 5 x 5 pixel features
d_w2 = tf.get_variable('d_w2', [5, 5, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.02))
d_b2 = tf.get_variable('d_b2', [64], initializer=tf.constant_initializer(0))
d2 = tf.nn.conv2d(input=d1, filter=d_w2, strides=[1, 1, 1, 1], padding='SAME')
d2 = d2 + d_b2
d2 = tf.nn.relu(d2)
d2 = tf.nn.avg_pool(d2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# First fully connected layer
d_w3 = tf.get_variable('d_w3', [7 * 7 * 64, 1024], initializer=tf.truncated_normal_initializer(stddev=0.02))
d_b3 = tf.get_variable('d_b3', [1024], initializer=tf.constant_initializer(0))
d3 = tf.reshape(d2, [-1, 7 * 7 * 64])
d3 = tf.matmul(d3, d_w3)
d3 = d3 + d_b3
d3 = tf.nn.relu(d3)
# Second fully connected layer
d_w4 = tf.get_variable('d_w4', [1024, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))
d_b4 = tf.get_variable('d_b4', [1], initializer=tf.constant_initializer(0))
d4 = tf.matmul(d3, d_w4) + d_b4
# d4 contains unscaled values
return d4
# Define the generator network
def generator(z, batch_size, z_dim):
g_w1 = tf.get_variable('g_w1', [z_dim, 3136], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.02))
g_b1 = tf.get_variable('g_b1', [3136], initializer=tf.truncated_normal_initializer(stddev=0.02))
g1 = tf.matmul(z, g_w1) + g_b1
g1 = tf.reshape(g1, [-1, 56, 56, 1])
g1 = tf.contrib.layers.batch_norm(g1, epsilon=1e-5, scope='bn1')
g1 = tf.nn.relu(g1)
# Generate 50 features
g_w2 = tf.get_variable('g_w2', [3, 3, 1, z_dim/2], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.02))
g_b2 = tf.get_variable('g_b2', [z_dim/2], initializer=tf.truncated_normal_initializer(stddev=0.02))
g2 = tf.nn.conv2d(g1, g_w2, strides=[1, 2, 2, 1], padding='SAME')
g2 = g2 + g_b2
g2 = tf.contrib.layers.batch_norm(g2, epsilon=1e-5, scope='bn2')
g2 = tf.nn.relu(g2)
g2 = tf.image.resize_images(g2, [56, 56])
# Generate 25 features
g_w3 = tf.get_variable('g_w3', [3, 3, z_dim/2, z_dim/4], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.02))
g_b3 = tf.get_variable('g_b3', [z_dim/4], initializer=tf.truncated_normal_initializer(stddev=0.02))
g3 = tf.nn.conv2d(g2, g_w3, strides=[1, 2, 2, 1], padding='SAME')
g3 = g3 + g_b3
g3 = tf.contrib.layers.batch_norm(g3, epsilon=1e-5, scope='bn3')
g3 = tf.nn.relu(g3)
g3 = tf.image.resize_images(g3, [56, 56])
# Final convolution with one output channel
g_w4 = tf.get_variable('g_w4', [1, 1, z_dim/4, 1], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.02))
g_b4 = tf.get_variable('g_b4', [1], initializer=tf.truncated_normal_initializer(stddev=0.02))
g4 = tf.nn.conv2d(g3, g_w4, strides=[1, 2, 2, 1], padding='SAME')
g4 = g4 + g_b4
g4 = tf.sigmoid(g4)
# Dimensions of g4: batch_size x 28 x 28 x 1
return g4
z_dimensions = 100
batch_size = 50
z_placeholder = tf.placeholder(tf.float32, [None, z_dimensions], name='z_placeholder')
# z_placeholder is for feeding input noise to the generator
x_placeholder = tf.placeholder(tf.float32, shape = [None,28,28,1], name='x_placeholder')
# x_placeholder is for feeding input images to the discriminator
Gz = generator(z_placeholder, batch_size, z_dimensions)
# Gz holds the generated images
Dx = discriminator(x_placeholder)
# Dx will hold discriminator prediction probabilities
# for the real MNIST images
Dg = discriminator(Gz, reuse_variables=True)
# Dg will hold discriminator prediction probabilities for generated images
# Define losses
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Dx, labels = tf.ones_like(Dx)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Dg, labels = tf.zeros_like(Dg)))
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Dg, labels = tf.ones_like(Dg)))
# Define variable lists
tvars = tf.trainable_variables()
d_vars = [var for var in tvars if 'd_' in var.name]
g_vars = [var for var in tvars if 'g_' in var.name]
# Define the optimizers
# Train the discriminator
d_trainer_fake = tf.train.AdamOptimizer(0.0003).minimize(d_loss_fake, var_list=d_vars)
d_trainer_real = tf.train.AdamOptimizer(0.0003).minimize(d_loss_real, var_list=d_vars)
# Train the generator
g_trainer = tf.train.AdamOptimizer(0.0001).minimize(g_loss, var_list=g_vars)
# From this point forward, reuse variables
tf.get_variable_scope().reuse_variables()
sess = tf.Session()
# Send summary statistics to TensorBoard
tf.summary.scalar('Generator_loss', g_loss)
tf.summary.scalar('Discriminator_loss_real', d_loss_real)
tf.summary.scalar('Discriminator_loss_fake', d_loss_fake)
images_for_tensorboard = generator(z_placeholder, batch_size, z_dimensions)
tf.summary.image('Generated_images', images_for_tensorboard, 5)
merged = tf.summary.merge_all()
logdir = "tensorboard/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + "/"
writer = tf.summary.FileWriter(logdir, sess.graph)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
# Pre-train discriminator
print "pre-training..."
for i in range(300):
z_batch = np.random.normal(0, 1, size=[batch_size, z_dimensions])
real_image_batch = mnist.train.next_batch(batch_size)[0].reshape([batch_size, 28, 28, 1])
_, __, dLossReal, dLossFake = sess.run([d_trainer_real, d_trainer_fake, d_loss_real, d_loss_fake],
{x_placeholder: real_image_batch, z_placeholder: z_batch})
# Train generator and discriminator together
print "training..."
for i in range(100000):
real_image_batch = mnist.train.next_batch(batch_size)[0].reshape([batch_size, 28, 28, 1])
z_batch = np.random.normal(0, 1, size=[batch_size, z_dimensions])
# Train discriminator on both real and fake images
_, __, dLossReal, dLossFake = sess.run([d_trainer_real, d_trainer_fake, d_loss_real, d_loss_fake],
{x_placeholder: real_image_batch, z_placeholder: z_batch})
# Train generator
z_batch = np.random.normal(0, 1, size=[batch_size, z_dimensions])
_ = sess.run(g_trainer, feed_dict={z_placeholder: z_batch})
if i % 10 == 0:
# Update TensorBoard with summary statistics
z_batch = np.random.normal(0, 1, size=[batch_size, z_dimensions])
summary = sess.run(merged, {z_placeholder: z_batch, x_placeholder: real_image_batch})
writer.add_summary(summary, i)
if i % 100 == 0:
saver_path = saver.save(sess, "tensorboard/model.ckpt")
print "iteration:", i, "d_loss_real:", dLossReal, "d_loss_fake:", dLossFake
| 45.489011
| 136
| 0.70552
|
3172772e1b4277d0d877d131e2815b189b33bcfd
| 195
|
py
|
Python
|
src/aio_dtls/dtls/handshake_ecdhe_psk.py
|
businka/aio_dtls
|
0dba40d425b443e5ceb516011aadf58f573a4dc8
|
[
"MIT"
] | null | null | null |
src/aio_dtls/dtls/handshake_ecdhe_psk.py
|
businka/aio_dtls
|
0dba40d425b443e5ceb516011aadf58f573a4dc8
|
[
"MIT"
] | null | null | null |
src/aio_dtls/dtls/handshake_ecdhe_psk.py
|
businka/aio_dtls
|
0dba40d425b443e5ceb516011aadf58f573a4dc8
|
[
"MIT"
] | null | null | null |
from .helper import Helper
from ..constructs import dtls
from ..tls.handshake_ecdhe_psk import EcdhePsk as TlsEcdhePsk
class EcdhePsk(TlsEcdhePsk):
tls_construct = dtls
helper = Helper
| 21.666667
| 61
| 0.779487
|
50a62ee105a643ebb7ca172adc067f69aea63a81
| 19,525
|
py
|
Python
|
google/ads/google_ads/v2/services/campaign_bid_modifier_service_client.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | 1
|
2019-11-30T23:42:39.000Z
|
2019-11-30T23:42:39.000Z
|
google/ads/google_ads/v2/services/campaign_bid_modifier_service_client.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v2/services/campaign_bid_modifier_service_client.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | 1
|
2020-03-13T00:14:31.000Z
|
2020-03-13T00:14:31.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v2.services CampaignBidModifierService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.ads.google_ads.v2.services import campaign_bid_modifier_service_client_config
from google.ads.google_ads.v2.services import enums
from google.ads.google_ads.v2.services.transports import campaign_bid_modifier_service_grpc_transport
from google.ads.google_ads.v2.proto.resources import account_budget_pb2
from google.ads.google_ads.v2.proto.resources import account_budget_proposal_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_asset_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_bid_modifier_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_feed_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_parameter_pb2
from google.ads.google_ads.v2.proto.resources import ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_schedule_view_pb2
from google.ads.google_ads.v2.proto.resources import age_range_view_pb2
from google.ads.google_ads.v2.proto.resources import asset_pb2
from google.ads.google_ads.v2.proto.resources import bidding_strategy_pb2
from google.ads.google_ads.v2.proto.resources import billing_setup_pb2
from google.ads.google_ads.v2.proto.resources import campaign_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import campaign_bid_modifier_pb2
from google.ads.google_ads.v2.proto.services import account_budget_proposal_service_pb2
from google.ads.google_ads.v2.proto.services import account_budget_proposal_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import account_budget_service_pb2
from google.ads.google_ads.v2.proto.services import account_budget_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_audience_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_feed_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_parameter_service_pb2
from google.ads.google_ads.v2.proto.services import ad_parameter_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_schedule_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_schedule_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_service_pb2
from google.ads.google_ads.v2.proto.services import ad_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import asset_service_pb2
from google.ads.google_ads.v2.proto.services import asset_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import bidding_strategy_service_pb2
from google.ads.google_ads.v2.proto.services import bidding_strategy_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import billing_setup_service_pb2
from google.ads.google_ads.v2.proto.services import billing_setup_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_audience_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class CampaignBidModifierServiceClient(object):
"""Service to manage campaign bid modifiers."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v2.services.CampaignBidModifierService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CampaignBidModifierServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def campaign_bid_modifier_path(cls, customer, campaign_bid_modifier):
"""Return a fully-qualified campaign_bid_modifier string."""
return google.api_core.path_template.expand(
'customers/{customer}/campaignBidModifiers/{campaign_bid_modifier}',
customer=customer,
campaign_bid_modifier=campaign_bid_modifier,
)
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None):
"""Constructor.
Args:
transport (Union[~.CampaignBidModifierServiceGrpcTransport,
Callable[[~.Credentials, type], ~.CampaignBidModifierServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = campaign_bid_modifier_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=campaign_bid_modifier_service_grpc_transport.CampaignBidModifierServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = campaign_bid_modifier_service_grpc_transport.CampaignBidModifierServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_campaign_bid_modifier(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the requested campaign bid modifier in full detail.
Args:
resource_name (str): The resource name of the campaign bid modifier to fetch.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v2.types.CampaignBidModifier` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_campaign_bid_modifier' not in self._inner_api_calls:
self._inner_api_calls['get_campaign_bid_modifier'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_campaign_bid_modifier,
default_retry=self._method_configs['GetCampaignBidModifier'].retry,
default_timeout=self._method_configs['GetCampaignBidModifier'].timeout,
client_info=self._client_info,
)
request = campaign_bid_modifier_service_pb2.GetCampaignBidModifierRequest(
resource_name=resource_name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('resource_name', resource_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_campaign_bid_modifier'](request, retry=retry, timeout=timeout, metadata=metadata)
def mutate_campaign_bid_modifiers(
self,
customer_id,
operations,
partial_failure=None,
validate_only=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates, updates, or removes campaign bid modifiers.
Operation statuses are returned.
Args:
customer_id (str): ID of the customer whose campaign bid modifiers are being modified.
operations (list[Union[dict, ~google.ads.googleads_v2.types.CampaignBidModifierOperation]]): The list of operations to perform on individual campaign bid modifiers.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.ads.googleads_v2.types.CampaignBidModifierOperation`
partial_failure (bool): If true, successful operations will be carried out and invalid
operations will return errors. If false, all operations will be carried
out in one transaction if and only if they are all valid.
Default is false.
validate_only (bool): If true, the request is validated but not executed. Only errors are
returned, not results.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v2.types.MutateCampaignBidModifiersResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'mutate_campaign_bid_modifiers' not in self._inner_api_calls:
self._inner_api_calls['mutate_campaign_bid_modifiers'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.mutate_campaign_bid_modifiers,
default_retry=self._method_configs['MutateCampaignBidModifiers'].retry,
default_timeout=self._method_configs['MutateCampaignBidModifiers'].timeout,
client_info=self._client_info,
)
request = campaign_bid_modifier_service_pb2.MutateCampaignBidModifiersRequest(
customer_id=customer_id,
operations=operations,
partial_failure=partial_failure,
validate_only=validate_only,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('customer_id', customer_id)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['mutate_campaign_bid_modifiers'](request, retry=retry, timeout=timeout, metadata=metadata)
| 52.486559
| 176
| 0.725787
|
e805319d651ea65e204b8fd744367a789289d818
| 57,572
|
py
|
Python
|
ion_functions/data/adcp_functions.py
|
ooici/ion-functions
|
ad5f7a1a00e3d359c72156d64f59686705c09d53
|
[
"Apache-2.0"
] | 10
|
2015-04-03T15:32:21.000Z
|
2018-11-21T11:57:26.000Z
|
ion_functions/data/adcp_functions.py
|
ooici/ion-functions
|
ad5f7a1a00e3d359c72156d64f59686705c09d53
|
[
"Apache-2.0"
] | 8
|
2015-01-07T15:19:22.000Z
|
2015-12-08T18:14:04.000Z
|
ion_functions/data/adcp_functions.py
|
ooici/ion-functions
|
ad5f7a1a00e3d359c72156d64f59686705c09d53
|
[
"Apache-2.0"
] | 17
|
2015-01-14T16:23:00.000Z
|
2021-07-19T08:26:52.000Z
|
#!/usr/bin/env python
"""
@package ion_functions.data.adcp_functions
@file ion_functions/data/adcp_functions.py
@author Christopher Wingard, Russell Desiderio, Craig Risien
@brief Module containing ADCP related data-calculations.
"""
import numpy as np
from ion_functions.data.generic_functions import magnetic_declination
from ion_functions.data.generic_functions import replace_fill_with_nan
# instrument fill value unprocessed by CI
# (bad beam velocity sentinel output by tRDI ADCP instruments)
ADCP_FILLVALUE = -32768
"""
**** For instruments programmed in beam coordinates:
(ADCPS-I,K; ADCPT-B,D,E)
adcp_beam_eastward -- calculates VELPROF-VLE_L1
adcp_beam_northward -- calculates VELPROF-VLN_L1
adcp_beam_vertical -- calculates VELPROF-VLU_L1
adcp_beam_error -- calculates VELPROF-ERR_L1
**** For instruments programmed in earth coordinates:
(ADCPA; ADCPS-J,L,N; ADCPT-C,F,G,M)
adcp_earth_eastward -- calculates VELPROF-VLE_L1
adcp_earth_northward -- calculates VELPROF-VLN_L1
adcp_earth_vertical -- calculates VELPROF-VLU_L1
adcp_earth_error -- calculates VELPROF-ERR_L1
**** For the VADCP programmed in beam coordinates:
vadcp_beam_eastward -- calculates VELTURB-VLE_L1
vadcp_beam_northward -- calculates VELTURB-VLN_L1
vadcp_beam_vertical_true -- calculates VELTURB-VLU-5BM_L1
vadcp_beam_vertical_est -- calculates VELTURB-VLU-4BM_L1
vadcp_beam_error -- calculates VELTURB-ERR_L1
**** For all tRDI ADCP instruments:
adcp_backscatter -- calculates ECHOINT-B1_L1,
calculates ECHOINT-B2_L1,
calculates ECHOINT-B3_L1,
calculates ECHOINT-B4_L1.
**** Base functions used by above functions
adcp_beam2ins -- applies the beam to instrument transform using a 4
beam solution for instruments programmed in beam coordinates
adcp_ins2earth -- applies the instrument to Earth transform for all
instruments originally programmed in beam coordinates.
magnetic_correction -- corrects horizontal velocities for the magnetic
variation (declination) at the measurement location.
**** Supplementary functions to calculate velocity bin depths:
adcp_bin_depths -- calculates bin depths for the pd0 output format
(virtually all tRDI ADCPs deployed by OOI); uses
TEOS-10 functions p_from_z and enthalpy_SSO_0_p.
adcp_bin_depths_pd8 -- calculates bin depths for the pd8 output format,
assuming that (1) the ADCP operator recorded the
necessary input variables and (2) these are somehow
entered into the CI system.
"""
# Wrapper functions to create the VELPROF L1 data products for instruments
# programmed in beam coordinates by RSN (ADCPS-I,K and ADCPT-B,D,E)
def adcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Eastward Velocity Profile (VELPROF-VLE)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-02-03: Christopher Wingard. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-04-04: Russell Desiderio. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: Christopher Wingard. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: Russell Desiderio.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
uu_cor = adcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
uu_corr = east velocity profiles in Earth coordinates corrected for the
magnetic declination (VELPROF-VLE_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, eee = adcp_beam2ins(b1, b2, b3, b4)
#print eee
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
uu_cor, _ = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
uu_cor = uu_cor / 1000. # mm/s -> m/s
# return the Eastward Velocity Profile
return uu_cor
def adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile (VELPROF-VLN)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-02-03: Christopher Wingard. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-03-28: Russell Desiderio. Corrected documentation only.
2014-04-04: Russell Desiderio. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: Christopher Wingard. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: Russell Desiderio.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
vv_cor = adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
vv_corr = north velocity profiles in Earth coordinates corrected for the
magnetic declination (VELPROF-VLN_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
_, vv_cor = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
def adcp_beam_vertical(b1, b2, b3, b4, h, p, r, vf):
"""
Description:
Wrapper function to compute the Upward Velocity Profile (VELPROF-VLU)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-02-03: Christopher Wingard. Formatting and adjusting to use
magnetic declination values calculated using the WMM 2010.
2014-04-04: Russell Desiderio. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: Christopher Wingard. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: Russell Desiderio.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
Usage:
ww_cor = adcp_beam_vertical(b1, b2, b3, b4, h, p, r, vf)
where
ww_cor = vertical velocity profiles (VELPROF-VLU_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
"""
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
_, _, ww = adcp_ins2earth(u, v, w, h, p, r, vf)
# scale upward velocity to m/s
ww = ww / 1000. # mm/s -> m/s
# return the Upward Velocity Profile
return ww
def adcp_beam_error(b1, b2, b3, b4):
"""
Description:
Wrapper function to compute the Error Velocity Profile (VELPROF-ERR)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2015-06-10: Russell Desiderio.
Moved the conditioning of input beam velocities to adcp_beam2inst.
Usage:
ww_cor = adcp_beam_error(b1, b2, b3, b4)
where
e = Error velocity profiles (VELPROF-ERR_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
"""
# compute the beam to instrument transform
_, _, _, e = adcp_beam2ins(b1, b2, b3, b4)
# scale error velocity to m/s
e = e / 1000. # mm/s
# return the Error Velocity Profile
return e
# Wrapper functions to create the VELPROF L1 data products for instruments
# programmed in Earth coordinates by CGSN (Pioneer and Endurance) (ADCPA,
# ADCPS-J,L,N and ADCPT-C,F,G,M)
def adcp_earth_eastward(u, v, z, lat, lon, dt):
"""
Description:
Wrapper function to compute the Eastward Velocity Profile (VELPROF-VLE)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-02-03: Christopher Wingard. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-04-04: Russell Desiderio. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: Christopher Wingard. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: Russell Desiderio.
Removed the depth dependence from the magnetic declination.
2015-06-25: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
uu_cor = adcp_earth_eastward(u, v, z, lat, lon, dt)
where
uu_cor = eastward velocity profiles in Earth coordinates corrected for
the magnetic declination (VELPROF-VLE_L1) [m s-1]
u = Eastward velocity profiles (VELPROF-VLE_L0) [mm s-1]
v = Northward velocity profiles (VELPROF-VLN_L0) [mm s-1]
z = instrument's pressure sensor reading (depth) [daPa]
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays
u = np.atleast_2d(u)
v = np.atleast_2d(v)
# on input, the elements of u and v are of type int.
u, v = replace_fill_with_nan(ADCP_FILLVALUE, u, v)
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
uu_cor, _ = magnetic_correction(theta, u, v)
# scale velocity to m/s
uu_cor = uu_cor / 1000. # mm/s -> m/s
# return the Eastward Velocity Profile
return uu_cor
def adcp_earth_northward(u, v, z, lat, lon, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile (VELPROF-VLN)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-02-03: Christopher Wingard. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-04-04: Russell Desiderio. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: Christopher Wingard. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: Russell Desiderio.
Removed the depth dependence from the magnetic declination.
2015-06-25: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
vv_cor = adcp_earth_northward(u, v, z, lat, lon, dt)
where
vv_cor = northward velocity profiles in Earth coordinates corrected for
the magnetic declination (VELPROF-VLN_L1) [m s-1]
u = Eastward velocity profiles (VELPROF-VLE_L0) [mm s-1]
v = Northward velocity profiles (VELPROF-VLN_L0) [mm s-1]
z = instrument's pressure sensor reading (depth) [daPa]
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays
u = np.atleast_2d(u)
v = np.atleast_2d(v)
# on input, the elements of u and v are of type int.
u, v = replace_fill_with_nan(ADCP_FILLVALUE, u, v)
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
_, vv_cor = magnetic_correction(theta, u, v)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
def adcp_earth_vertical(w):
"""
Description:
Wrapper function to compute the Upward Velocity Profile (VELPROF-VLU)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2014-06-25: Christopher Wingard. Initial code.
2015-06-25: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
w_scl = adcp_earth_vertical(w)
where
w_scl = scaled upward velocity profiles in Earth coordinates
(VELPROF-VLN_L1) [m s-1]
w = upward velocity profiles (VELPROF-VLU_L0) [mm s-1]
"""
w = replace_fill_with_nan(ADCP_FILLVALUE, w)
# scale velocity to m/s
w_scl = w / 1000. # mm/s -> m/s
# return the Upward Velocity Profile
return w_scl
def adcp_earth_error(e):
"""
Description:
Wrapper function to compute the Error Velocity Profile (VELPROF-ERR)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2014-06-25: Christopher Wingard. Initial code.
2015-06-25: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
e_scl = adcp_earth_vertical(w)
where
e_scl = scaled error velocity profiles in Earth coordinates
(VELPROF-ERR_L1) [m s-1]
e = error velocity profiles (VELPROF-ERR_L0) [mm s-1]
"""
e = replace_fill_with_nan(ADCP_FILLVALUE, e)
# scale velocity to m/s
e_scl = e / 1000. # mm/s -> m/s
# return the scaled Error Velocity Profile
return e_scl
# Compute the VELTURB_L1 data products for the VADCP instrument deployed by RSN.
def vadcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Eastward Velocity Profile (VELTURB-VLE)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Turbulent Velocity Profile and Echo Intensity -
DCN 1341-00760.
Implemented by:
2014-06-25: Christopher Wingard. Initial code, based on existing ADCP
2015-06-10: Russell Desiderio.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
uu_cor = vadcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
uu_cor = east velocity profiles in Earth coordinates corrected for the
magnetic declination (VELTURB-VLE_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
uu_cor, _ = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
uu_cor = uu_cor / 1000. # mm/s -> m/s
# return the Eastward Velocity Profile
return uu_cor
def vadcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile
(VELTURB-VLN) from beam coordinate transformed velocity profiles as
defined in the Data Product Specification for Turbulent Velocity
Profile and Echo Intensity - DCN 1341-00760.
Implemented by:
2014-06-25: Christopher Wingard. Initial code, based on existing ADCP
2015-06-10: Russell Desiderio.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
vv_cor = vadcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
vv_cor = north velocity profiles in Earth coordinates corrected for the
magnetic declination (VELTURB-VLN_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [dm]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... corect for it
_, vv_cor = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
def vadcp_beam_vertical_est(b1, b2, b3, b4, h, p, r, vf):
"""
Description:
Wrapper function to compute the "estimated" Upward Velocity Profile
(VELTURB-VLU-4BM) from the beam coordinate transformed velocity profiles as
defined in the Data Product Specification for Turbulent Velocity
Profile and Echo Intensity - DCN 1341-00760. This provides the
traditional estimate of the vertical velocity component from a 4 beam
solution, where each beam is facing outward at an angle (20 degrees)
relative to the vertical.
Implemented by:
2014-06-25: Christopher Wingard. Initial code, based on existing ADCP
2015-06-10: Russell Desiderio.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
2015-06-22: Russell Desiderio. Renamed this data product.
Usage:
ww_est = vadcp_beam_vertical_est(b1, b2, b3, b4, h, p, r, vf)
where
ww_est = estimated vertical velocity profiles in Earth coordinates
(VELTURB-VLU-4BM_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
"""
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
_, _, ww = adcp_ins2earth(u, v, w, h, p, r, vf)
# scale upward velocity to m/s
ww = ww / 1000. # mm/s -> m/s
# return the estimated Upward Velocity Profile
return ww
def vadcp_beam_vertical_true(b1, b2, b3, b4, b5, h, p, r, vf):
"""
Description:
Wrapper function to compute the "true" Upward Velocity Profile
(VELTURB-VLU-5BM) from the beam coordinate transformed velocity profiles as
defined in the Data Product Specification for Turbulent Velocity
Profile and Echo Intensity - DCN 1341-00760. This is assumed to provide
a better estimate of the true vertical velocity component, since beam 5
is pointing directly up.
Implemented by:
2014-06-25: Christopher Wingard. Initial code, based on existing ADCP
2015-06-10: Russell Desiderio.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
2015-06-22: Russell Desiderio. Renamed this data product.
2015-06-25: Russell Desiderio. Incorporated b5 int fillvalue -> Nan.
Usage:
ww_true = vadcp_beam_vertical_true(b1, b2, b3, b4, b5, h, p, r, vf)
where
ww_true = true vertical velocity profiles in Earth coordinates
(VELTURB-VLU-5BM_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
b5 = "beam 5" velocity profiles in beam coordinates (VELTURB-B5_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
"""
# compute the beam to instrument transform
# fill values in the 4 beams are checked for inside adcp_beam2ins
u, v, _, _ = adcp_beam2ins(b1, b2, b3, b4)
# check b5 for the presence of fill values
b5 = replace_fill_with_nan(ADCP_FILLVALUE, b5)
# compute the instrument to earth beam transform
# fill values in the adcp orientation parameters are checked for inside adcp_ins2earth
_, _, ww = adcp_ins2earth(u, v, b5, h, p, r, vf)
# scale upward velocity to m/s
ww = ww / 1000. # mm/s -> m/s
# return the true Upward Velocity Profile
return ww
def vadcp_beam_error(b1, b2, b3, b4):
"""
Description:
Wrapper function to compute the Error Velocity Profile (VELTURB-ERR)
from the beam coordinate transformed velocity profiles as defined in
the Data Product Specification for Turbulent Velocity Profile and Echo
Intensity - DCN 1341-00760.
Implemented by:
2014-06-25: Christopher Wingard. Initial code, based on existing ADCP
2015-06-10: Russell Desiderio.
Moved the conditioning of input beam velocities to adcp_beam2inst.
Usage:
e = vadcp_beam_northward(b1, b2, b3, b4)
where
e = error velocity profiles (VELTURB-ERR_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
"""
# compute the beam to instrument transform
_, _, _, e = adcp_beam2ins(b1, b2, b3, b4)
# scale error velocity to m/s
e = e / 1000. # mm/s
# return the Error Velocity Profile
return e
# Calculates ECHOINT_L1 for all tRDI ADCPs
def adcp_backscatter(raw, sfactor):
"""
Description:
Converts the echo intensity data from counts to dB using a factory
specified scale factor (nominally 0.45 dB/count for the Workhorse
family of ADCPs and 0.61 dB/count for the ExplorerDVL family). As
defined in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2014-04-21: Christopher Wingard. Initial code.
2015-06-25: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
dB = adcp_backscatter(raw, sfactor)
where
dB = Relative Echo Intensity (ECHOINT_L1) [dB]
raw = raw echo intensity (ECHOINT_L0) [count]
sfactor = factory supplied scale factor, instrument and beam specific [dB/count]
Notes:
The ADCP outputs the raw echo intensity as a 1-byte integer, so the ADCP_FILLVALUE
cannot apply (requires 2 bytes).
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
if np.isscalar(sfactor) is False:
sfactor = sfactor.reshape(sfactor.shape[0], 1)
# check raw for the presence of system fill values
raw = replace_fill_with_nan(None, raw)
dB = raw * sfactor
return dB
##### ADCP Beam to Earth Transforms and Magnetic Variation Corrections
def adcp_beam2ins(b1, b2, b3, b4):
"""
Description:
This function converts the Beam Coordinate transformed velocity
profiles to the instrument coordinate system. The calculations are
defined in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2015-06-24: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
u, v, w, e = adcp_beam2ins(b1, b2, b3, b4)
where
u = "east" velocity profiles in instrument coordinates [mm s-1]
v = "north" velocity profiles in instrument coordinates [mm s-1]
w = "vertical" velocity profiles in instrument coordinates [mm s-1]
e = "error" velocity profiles [mm s-1]
b1 = "beam 1" velocity profiles in beam coordinates [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates [mm s-1]
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
b1 = np.atleast_2d(b1)
b2 = np.atleast_2d(b2)
b3 = np.atleast_2d(b3)
b4 = np.atleast_2d(b4)
b1, b2, b3, b4 = replace_fill_with_nan(ADCP_FILLVALUE, b1, b2, b3, b4)
theta = 20.0 / 180.0 * np.pi
a = 1.0 / (2.0 * np.sin(theta))
b = 1.0 / (4.0 * np.cos(theta))
c = 1.0 # +1.0 for convex transducer head, -1 for concave
d = a / np.sqrt(2.0)
u = c * a * (b1 - b2)
v = c * a * (b4 - b3)
w = b * (b1 + b2 + b3 + b4)
e = d * (b1 + b2 - b3 - b4)
return (u, v, w, e)
def adcp_ins2earth(u, v, w, heading, pitch, roll, vertical):
"""
Description:
This function converts the Instrument Coordinate transformed velocity
profiles to the Earth coordinate system. The calculation is defined in
the Data Product Specification for Velocity Profile and Echo Intensity
- DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-04-04: Russell Desiderio. Optimized code performance by replacing the for
loops previously used to calculate vectorized matrix multiplication
products with calls to np.einsum (numpy Einstein summation function).
2015-06-24: Russell Desiderio. Changed implementation of 'vertical' in the roll
calculation so that if these values are equal to the CI fill value
(-999999999), when these fill values are replaced with nans, the nans
will propagate through to the data product output.
2015-06-24: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
uu, vu, ww = adcp_ins2earth(u, v, w, heading, pitch, roll, vertical)
where
uu = "east" velocity profiles in earth coordinates [mm s-1]
vv = "north" velocity profiles in earth coordinates [mm s-1]
ww = "vertical" velocity profiles in earth coordinates [mm s-1]
u = east velocity profiles in instrument coordinates [mm s-1]
v = north velocity profiles in instrument coordinates [mm s-1]
w = vertical velocity profiles in instrument coordinates [mm s-1]
heading = instrument's uncorrected magnetic heading [centidegrees]
pitch = instrument pitch [centidegrees]
roll = instrument roll [centidegrees]
vertical = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
### the input beam data for adcp_ins2earth are always called using the output
### of adcp_beam2ins, so the following lines are not needed.
# insure we are dealing with array inputs
#u = np.atleast_2d(u)
#v = np.atleast_2d(v)
#w = np.atleast_2d(w)
# check for CI fill values before changing units.
# this function 'conditions' (np.atleast_1d) its inputs.
# TRDI does not apply its ADCP fill/bad value sentinels to compass data.
heading, pitch, roll, vertical = replace_fill_with_nan(None, heading, pitch, roll, vertical)
# change units from centidegrees to degrees
heading = heading / 100.0
pitch = pitch / 100.0
roll = roll / 100.0
# better way to calculate roll from the vertical orientation toggle;
# this will propagate R as nans if the vertical variable is missing from the data.
R = roll + vertical * 180.0
# roll
Rrad = np.radians(R)
cos_R = np.cos(Rrad)
sin_R = np.sin(Rrad)
# heading
Hrad = np.radians(heading)
cos_H = np.cos(Hrad)
sin_H = np.sin(Hrad)
# pitch
t1rad = np.radians(pitch)
t2rad = np.radians(roll)
Prad = np.arctan(np.tan(t1rad) * np.cos(t2rad))
cos_P = np.cos(Prad)
sin_P = np.sin(Prad)
# determine array size
n_packets = u.shape[0]
n_uvw = u.shape[1]
# initialize vectors to be used as matrix elements
ones = np.ones(n_packets)
zeros = ones * 0.0
# the rollaxis calls reorient the matrices so that their lead index is
# the data packet index
M1 = np.array([[cos_H, sin_H, zeros],
[-sin_H, cos_H, zeros],
[zeros, zeros, ones]])
M1 = np.rollaxis(M1, 2)
M2 = np.array([[ones, zeros, zeros],
[zeros, cos_P, -sin_P],
[zeros, sin_P, cos_P]])
M2 = np.rollaxis(M2, 2)
M3 = np.array([[cos_R, zeros, sin_R],
[zeros, ones, zeros],
[-sin_R, zeros, cos_R]])
M3 = np.rollaxis(M3, 2)
# construct input array of coordinates (velocities) to be transformed.
# the basis set is 3D (E,N,U) so that the middle dimension is sized at 3.
uvw = np.zeros((n_packets, 3, n_uvw))
# pack the coordinates (velocities) to be transformed into the appropriate
# slices.
uvw[:, 0, :] = u
uvw[:, 1, :] = v
uvw[:, 2, :] = w
# the Einstein summation is here configured to do the matrix
# multiplication MM(i,l) = M1(i,j) * M2(j,k) * M3(k,l) on each slice h.
MM = np.einsum('hij,hjk,hkl->hil', M1, M2, M3)
# the Einstein summation is here configured to do the matrix
# multiplication uvw_earth(i,m) = MM(i,l) * uvw(l,m) on each slice h.
uvw_earth = np.einsum('hil,hlm->him', MM, uvw)
# NOTE:
# these last two executable statements run about a factor of 2
# faster in the 10000 data packet performance tests versus combining
# these operations into the one statement:
# uvw_earth = np.einsum('hij,hjk,hkl,hlm->him', M1, M2, M3, uvw)
# break out the coordinate slices and return them
uu = uvw_earth[:, 0, :]
vv = uvw_earth[:, 1, :]
ww = uvw_earth[:, 2, :]
return (uu, vv, ww)
def magnetic_correction(theta, u, v):
"""
Description:
This function corrects velocity profiles for the magnetic variation
(declination) at the measurement location. The magnetic declination
is obtained from the 2010 World Magnetic Model (WMM2010) provided by
NOAA (see wmm_declination).
This version handles 'vectorized' input variables without using for
loops. It was specifically written to handle the case of a 1D array of
theta values, theta=f(i), with corresponding sets of 'u' and 'v' values
such that u=f(i,j) and v=f(i,j), where there are j 'u' and 'v' values
for each theta(i).
Implemented by:
2014-04-04: Russell Desiderio. Initial code. This function is used to
calculate magnetic corrections by the functions contained
in this module instead of the function magnetic_correction
found in ion_functions.data.generic_functions.
2015-04-10: Russell Desiderio. Corrected a typo:
uv = np.atleast_2d(u) -> u = np.atleast_2d(u)
Usage:
u_cor, v_cor = magnetic_correction(theta, u, v)
where
u_cor = eastward velocity profiles, in earth coordinates, with
the correction for magnetic variation applied.
v_cor = northward velocity profiles, in earth coordinates,
with the correction for magnetic variation applied.
theta = magnetic variation based on location (latitude, longitude and
altitude) and date; units of theta are [degrees]
u = uncorrected eastward velocity profiles in earth coordinates
v = uncorrected northward velocity profiles in earth coordinates
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
OOI (2013). Data Product Specification for Turbulent Velocity Profile
and Echo Intensity. Document Control Number 1341-00760.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00760_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
# force shapes of inputs to arrays
theta = np.atleast_1d(theta)
u = np.atleast_2d(u)
v = np.atleast_2d(v)
theta_rad = np.radians(theta)
cosT = np.cos(theta_rad)
sinT = np.sin(theta_rad)
M = np.array([[cosT, sinT],
[-sinT, cosT]])
# roll axes so that the lead index represents data packet #.
M = np.rollaxis(M, 2)
# the coordinate system is 2D, so the middle dimension is sized at 2.
uv = np.zeros((u.shape[0], 2, u.shape[1]))
# pack the coordinates to be rotated into the appropriate slices
uv[:, 0, :] = u
uv[:, 1, :] = v
# the Einstein summation is here configured to do the matrix
# multiplication uv_cor(i,k) = M(i,j) * uv(j,k) on each slice h.
uv_cor = np.einsum('hij,hjk->hik', M, uv)
# the magnetically corrected u values are:
u_cor = uv_cor[:, 0, :]
# the magnetically corrected v values are:
v_cor = uv_cor[:, 1, :]
# return corrected u and v values
return (u_cor, v_cor)
def adcp_bin_depths_bar(dist_first_bin, bin_size, num_bins, pressure, adcp_orientation, latitude):
"""
Description:
Calculates the center bin depths for PD0 and PD12 ADCP data. As defined
in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2015-01-29: Craig Risien. Initial code.
2015-06-26: Russell Desiderio. Fixed the handling of the pressure variables.
Time-vectorized the code by finessing the conditional.
2015-06-30: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
bin_depths = adcp_bin_depths(dist_first_bin, bin_size, num_bins, pressure,
adcp_orientation, latitude)
where
bin_depths = [meters]
dist_first_bin = distance to the first ADCP bin [centimeters]
bin_size = depth of each ADCP bin [centimeters]
num_bins = number of ADCP bins [unitless]
pressure = pressure at the sensor head [bar]
adcp_orientation = 1=upward looking or 0=downward looking [unitless]
latitude = latitude of the instrument [degrees]
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
# check for CI fill values.
pressure = replace_fill_with_nan(None, pressure)
# Convert pressure from bar to decibar
pressure_dbar = pressure * 10.0
# Calculate sensor depth using TEOS-10 toolbox z_from_p function
# note change of sign to make the sensor_depth variable positive
sensor_depth = -z_from_p(pressure_dbar, latitude)
return adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
def adcp_bin_depths_dapa(dist_first_bin, bin_size, num_bins, pressure, adcp_orientation, latitude):
"""
Description:
Calculates the center bin depths for PD0 and PD12 ADCP data. As defined
in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2015-01-29: Craig Risien. Initial code.
2015-06-26: Russell Desiderio. Fixed the handling of the pressure variables.
Time-vectorized the code by finessing the conditional.
2015-06-30: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
bin_depths = adcp_bin_depths(dist_first_bin, bin_size, num_bins, pressure,
adcp_orientation, latitude)
where
bin_depths = [meters]
dist_first_bin = distance to the first ADCP bin [centimeters]
bin_size = depth of each ADCP bin [centimeters]
num_bins = number of ADCP bins [unitless]
pressure = pressure at the sensor head [daPa]
adcp_orientation = 1=upward looking or 0=downward looking [unitless]
latitude = latitude of the instrument [degrees]
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
# check for CI fill values.
pressure = replace_fill_with_nan(None, pressure)
# Convert pressure from decaPascal to decibar
pressure_dbar = pressure / 1000.0
# Calculate sensor depth using TEOS-10 toolbox z_from_p function
# note change of sign to make the sensor_depth variable positive
sensor_depth = -z_from_p(pressure_dbar, latitude)
return adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
def z_from_p(p, lat, geo_strf_dyn_height=0, sea_surface_geopotential=0):
"""Calculates height from sea pressure using the computationally-efficient
75-term expression for density in terms of SA, CT and p (Roquet et al.,
2015). Dynamic height anomaly, geo_strf_dyn_height, if provided, must be
computed with its p_ref=0 (the surface). Also if provided, sea_surface_geopotental
is the geopotential at zero sea pressure.
Calls a function which calculates enthalpy assuming standard ocean salinity
and 0 degrees celsius.
Parameters
----------
p : pressure [dbar]
lat : latitude in decimal degrees north [-90..+90]
geo_strf_dyn_height : dynamic height anomaly [m^2/s^2]
sea_surface_geopotential : geopotential at zero sea pressure [ m^2/s^2 ]
Returns
-------
z : TEOS-10 height [m] : height is returned as a negative number; its
absolute value is the depth below the sea surface.
#################################################################
# Check values from TEOS-10 version 3.05 (matlab code): #
# from http://www.teos-10.org/pubs/gsw/html/gsw_z_from_p.html #
#################################################################
p = [10, 50, 125, 250, 600, 1000]
lat = 4
z_from_p(p, lat) =
[ -9.9445834469453, -49.7180897012550, -124.2726219409978,
-248.4700576548589, -595.8253480356214, -992.0919060719987]
Notes
-----
At sea level z = 0, and since z (HEIGHT) is defined to be positive upwards,
it follows that while z is positive in the atmosphere, it is NEGATIVE in
the ocean.
References
----------
IOC, SCOR and IAPSO, 2010: The international thermodynamic equation of
seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. Available from the TEOS-10 web site.
McDougall, T.J., D.R. Jackett, D.G. Wright and R. Feistel, 2003:
Accurate and computationally efficient algorithms for potential
temperature and density of seawater. J. Atmosph. Ocean. Tech., 20,
pp. 730-741.
Moritz, 2000: Goedetic reference system 1980. J. Geodesy, 74, 128-133.
Roquet, F., G. Madec, T.J. McDougall, P.M. Barker, 2015: Accurate
polynomial expressions for the density and specifc volume of seawater
using the TEOS-10 standard. Ocean Modelling.
Saunders, P. M., 1981: Practical conversion of pressure to depth.
Journal of Physical Oceanography, 11, 573-574.
IMPLEMENTATION NOTES:
Russell Desiderio. 2015_07_01
versions 3.04 and 3.05 of the main function z_from_p are identical.
z_from_p calls the subroutine enthalpy_SSO_0_p; this subroutine
has been updated from ver 3.04 to 3.05.
the check values above for z_from_p have been updated to incorporate
this change using enthalpy_SSO_0_p ver 3.05.
"""
X = np.sin(np.deg2rad(lat))
sin2 = X ** 2
B = 9.780327 * (1.0 + (5.2792e-3 + (2.32e-5 * sin2)) * sin2)
gamma = 2.26e-07
A = -0.5 * gamma * B
C = enthalpy_SSO_0_p(p) - geo_strf_dyn_height
return -2 * C / (B + np.sqrt(B ** 2 - 4 * A * C))
def enthalpy_SSO_0_p(p):
"""
This documentation and code is copy\pasted from the matlab coding of this function.
%==========================================================================
% This function calculates enthalpy at the Standard Ocean Salinity, SSO,
% and at a Conservative Temperature of zero degrees C, as a function of
% pressure, p, in dbar, using a streamlined version of the 76-term
% computationally-efficient expression for specific volume, that is, a
% streamlined version of the code "gsw_enthalpy(SA,CT,p)".
%
% VERSION NUMBER: 3.05 (27th January 2015)
%
% REFERENCES:
% Roquet, F., G. Madec, T.J. McDougall, P.M. Barker, 2015: Accurate
% polynomial expressions for the density and specifc volume of seawater
% using the TEOS-10 standard. Ocean Modelling.
%
%==========================================================================
IMPLEMENTATION NOTES:
Russell Desiderio. 2015_07_01. this subroutine has been updated
from ver 3.04 to 3.05.
"""
z = p * 1e-4
h006 = -2.1078768810e-9
h007 = 2.8019291329e-10
dynamic_enthalpy_SSO_0_p = z * (9.726613854843870e-4 + z * (-2.252956605630465e-5 + z * (
2.376909655387404e-6 + z * (-1.664294869986011e-7 + z * (
-5.988108894465758e-9 + z * (h006 + h007 * z))))))
enthalpy_SSO_0 = dynamic_enthalpy_SSO_0_p * 1.e8 # Note. 1e8 = db2Pa*1e4
return enthalpy_SSO_0
def adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation):
"""
Description:
Calculates the center bin depths for PD0, PD8 and PD12 ADCP data. As defined
in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2015-01-30: Craig Risien. Initial code.
2015-06-26: Russell Desiderio. Time-vectorized the code by finessing the conditionals.
2015-06-30: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
bin_depths_pd8 = adcp_bin_depths(dist_first_bin, bin_size, num_bins, sensor_depth,
adcp_orientation)
where
bin_depths_pd8 = [meters]
dist_first_bin = distance to the first ADCP bin [centimeters]
bin_size = depth of each ADCP bin [centimeters]
num_bins = number of ADCP bins [unitless]
sensor_depth = estimated depth at the sensor head [meters]
adcp_orientation = 1=upward looking or 0=downward looking [unitless]
Notes:
The PD8 output format is a very sparse format. Other than num_bins, it does *not* record
any of the other input variables required by this DPA. Those must somehow be supplied "by
hand".
"""
# check for CI fill values.
#
# Note that these input parameters will not come from an IDD driver (except for possibly
# (num_bins) because the PD8 output format does not output them. Therefore, I don't know
# if they will be of type integer or not. However, ndarrays composed of float types are
# passed through the check-code unchanged, so run the inputs through in case they are of
# type int and in case -999999999 fill values are somehow present.
dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation = replace_fill_with_nan(
None, dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
# note, there is a CI problem not yet addressed if the time-vectorized values
# in num_bins are not all the same!! For now, assume they are all the same:
num_bins_constant = num_bins[0]
# make bin_numbers a row vector
bin_numbers = np.array([np.arange(num_bins_constant)])
# Convert from cm to meters
# the input variables are type integer, so divide by a real number
# to avoid truncation errors.
dist_first_bin = dist_first_bin / 100.0
bin_size = bin_size / 100.0
# make sure sensor depth is positive
sensor_depth = np.fabs(sensor_depth)
# Following the PD0 convention where
# adcp_orientation = 0 is downward looking, bindepths are added to sensor depth
# = 1 is upward looking, bindepths are subtracted from sensor depth
z_sign = 1.0 - 2.0 * adcp_orientation
# to broadcast the vertical time dimension correctly with the horizontal bin_numbers dimension,
# make all the 1D time arrays into column vectors to be processed with the bin_numbers row vector.
sensor_depth = sensor_depth.reshape(-1, 1)
z_sign = z_sign.reshape(-1, 1)
dist_first_bin = dist_first_bin.reshape(-1, 1)
bin_size = bin_size.reshape(-1, 1)
# Calculate bin depths
bin_depths_pd8 = sensor_depth + z_sign * (dist_first_bin + bin_size * bin_numbers)
return bin_depths_pd8
| 40.036161
| 103
| 0.63418
|
66f7c2fde8d72f64a1371ced7e47b3ca47545073
| 8,290
|
py
|
Python
|
tests/edalize_common.py
|
idex-biometrics/edalize
|
36a12f8e47ffb0cda4e6c8fe76fafb2073fa7704
|
[
"BSD-2-Clause"
] | 1
|
2022-03-17T23:30:32.000Z
|
2022-03-17T23:30:32.000Z
|
tests/edalize_common.py
|
idex-biometrics/edalize
|
36a12f8e47ffb0cda4e6c8fe76fafb2073fa7704
|
[
"BSD-2-Clause"
] | null | null | null |
tests/edalize_common.py
|
idex-biometrics/edalize
|
36a12f8e47ffb0cda4e6c8fe76fafb2073fa7704
|
[
"BSD-2-Clause"
] | null | null | null |
from collections import OrderedDict
import os.path
import shutil
import pytest
from edalize import get_edatool
tests_dir = os.path.dirname(__file__)
class TestFixture:
"""A fixture that makes an edalize backend with work_root directory
Create this object using the make_edalize_test factory fixture. This passes
through its `tool_name` and sets up a temporary directory for `work_root`,
then passes its keyword arguments through to the TestFixture initializer.
Args:
tool_name: The name of the tool
work_root: The directory to treat as a work root
test_name: The name to call the backend. Defaults to
`'test_<tool_name>_0'`
param_types: A list of parameter types. Defaults to `['plusarg',
'vlogdefine', 'vlogparam']` (the parameter types supported
by most simulators).
files: A list of files to use. Defaults to `None`, which means to use
:py:data:`FILES`.
tool_options: Dictionary passed to _setup_backend. Defaults to `{}`.
ref_dir: A reference directory relative to `test_<tool_name>`. Defaults
to `'.'`
use_vpi: If true, set up backend with definitions from :attr:`VPI`.
Defaults to `False`.
"""
def __init__(
self,
tool_name,
work_root,
test_name=None,
param_types=["plusarg", "vlogdefine", "vlogparam"],
files=None,
tool_options={},
ref_dir=".",
use_vpi=False,
toplevel="top_module",
):
raw_ref_dir = os.path.join(tests_dir, "test_" + tool_name, ref_dir)
self.test_name = (
"test_{}_0".format(tool_name) if test_name is None else test_name
)
self.ref_dir = os.path.normpath(raw_ref_dir)
self.work_root = work_root
self.backend = _setup_backend(
self.test_name,
tool_name,
param_types,
files,
tool_options,
work_root,
use_vpi,
toplevel,
)
def compare_files(self, files, ref_subdir="."):
"""Check some files in the work root match those in the ref directory
The files argument gives the list of files to check. These are
interpreted as paths relative to the work directory and relative to
self.ref_dir / ref_subdir.
This is a wrapper around edalize_common.compare_files: see its
documentation for how to use the :envvar:`GOLDEN_RUN` environment
variable to copy across a golden reference.
"""
ref_dir = os.path.normpath(os.path.join(self.ref_dir, ref_subdir))
return compare_files(ref_dir, self.work_root, files)
def copy_to_work_root(self, path):
shutil.copy(
os.path.join(self.ref_dir, path), os.path.join(self.work_root, path)
)
@pytest.fixture
def make_edalize_test(monkeypatch, tmpdir):
"""A factory fixture to make an edalize backend with work_root directory
The returned factory method takes a `tool_name` (the name of the tool) and
the keyword arguments supported by :class:`TestFixture`. It returns a
:class:`TestFixture` object, whose `work_root` is a temporary directory.
"""
# Prepend directory `mock_commands` to PATH environment variable
monkeypatch.setenv("PATH", os.path.join(tests_dir, "mock_commands"), ":")
created = []
def _fun(tool_name, **kwargs):
work_root = tmpdir / str(len(created))
work_root.mkdir()
fixture = TestFixture(tool_name, str(work_root), **kwargs)
created.append(fixture)
return fixture
return _fun
def compare_files(ref_dir, work_root, files):
"""Check that all *files* in *work_root* match those in *ref_dir*.
If the environment variable :envvar:`GOLDEN_RUN` is set, the *files* in
*work_root* are copied to *ref_dir* to become the new reference.
"""
for f in files:
reference_file = os.path.join(ref_dir, f)
generated_file = os.path.join(work_root, f)
assert os.path.exists(generated_file)
if "GOLDEN_RUN" in os.environ:
shutil.copy(generated_file, reference_file)
with open(reference_file) as fref, open(generated_file) as fgen:
assert fref.read() == fgen.read(), f
def param_gen(paramtypes):
"""Generate dictionary of definitions in *paramtypes* list."""
defs = OrderedDict()
for paramtype in paramtypes:
for datatype in ["bool", "int", "str"]:
if datatype == "int":
default = 42
elif datatype == "str":
default = "hello"
else:
default = True
defs[paramtype + "_" + datatype] = {
"datatype": datatype,
"default": default,
"description": "",
"paramtype": paramtype,
}
return defs
def _setup_backend(
name, tool, paramtypes, files, tool_options, work_root, use_vpi, toplevel
):
"""Set up a backend.
The backend is called *name*, is set up for *tool* with *tool_options*,
*paramtypes*, and, if *use_vpi* is ``True``, definitions from :attr:`VPI`.
If *files* is None, files are taken from :attr:`FILES`.
"""
parameters = param_gen(paramtypes)
_vpi = []
if use_vpi:
_vpi = VPI
for v in VPI:
for f in v["src_files"]:
_f = os.path.join(work_root, f)
if not os.path.exists(os.path.dirname(_f)):
os.makedirs(os.path.dirname(_f))
with open(_f, "a"):
os.utime(_f, None)
edam = {
"name": name,
"files": FILES if files is None else files,
"parameters": parameters,
"tool_options": {tool: tool_options},
"toplevel": toplevel,
"vpi": _vpi,
}
return get_edatool(tool)(edam=edam, work_root=work_root)
FILES = [
{"name": "qip_file.qip", "file_type": "QIP"},
{"name": "qsys_file", "file_type": "QSYS"},
{"name": "sdc_file", "file_type": "SDC"},
{"name": "bmm_file", "file_type": "BMM"},
{"name": "sv_file.sv", "file_type": "systemVerilogSource"},
{"name": "pcf_file.pcf", "file_type": "PCF"},
{"name": "ucf_file.ucf", "file_type": "UCF"},
{"name": "user_file", "file_type": "user"},
{"name": "tcl_file.tcl", "file_type": "tclSource"},
{"name": "waiver_file.waiver", "file_type": "waiver"},
{"name": "vlog_file.v", "file_type": "verilogSource"},
{"name": "vlog05_file.v", "file_type": "verilogSource-2005"},
{"name": "vlog_incfile", "file_type": "verilogSource", "is_include_file": True},
{"name": "vhdl_file.vhd", "file_type": "vhdlSource"},
{"name": "vhdl_lfile", "file_type": "vhdlSource", "logical_name": "libx"},
{"name": "vhdl2008_file", "file_type": "vhdlSource-2008"},
{"name": "xci_file.xci", "file_type": "xci"},
{"name": "xdc_file.xdc", "file_type": "xdc"},
{"name": "bootrom.mem", "file_type": "mem"},
{"name": "c_file.c", "file_type": "cSource"},
{"name": "cpp_file.cpp", "file_type": "cppSource"},
{"name": "c_header.h", "file_type": "cSource", "is_include_file": True},
{"name": "c_header.h", "file_type": "cppSource", "is_include_file": True},
{"name": "config.vbl", "file_type": "veribleLintRules"},
{"name": "verible_waiver.vbw", "file_type": "veribleLintWaiver"},
{"name": "verible_waiver2.vbw", "file_type": "veribleLintWaiver"},
{"name": "config.sby.j2", "file_type": "sbyConfigTemplate"},
{"name": "another_sv_file.sv", "file_type": "systemVerilogSource"},
{"name": "pdc_constraint_file.pdc", "file_type": "PDC"},
{"name": "qsf_constraint_file.qsf", "file_type": "QSF"},
{"name": "pdc_floorplan_constraint_file.pdc", "file_type": "FPPDC"},
{"name": "lpf_file.lpf", "file_type": "LPF"},
]
"""Files of all supported file types."""
VPI = [
{
"src_files": ["src/vpi_1/f1", "src/vpi_1/f3"],
"include_dirs": ["src/vpi_1/"],
"libs": ["some_lib"],
"name": "vpi1",
},
{"src_files": ["src/vpi_2/f4"], "include_dirs": [], "libs": [], "name": "vpi2"},
]
"""Predefined VPI modules to build."""
| 33.97541
| 84
| 0.602654
|
323e78e9c2f1f27943fa2cfa3516158f210c9e33
| 627
|
py
|
Python
|
helpers/db_test.py
|
thejeswi/BobGoesToJail
|
ac8a6e4242446634837d6166158fc5401c2818ac
|
[
"MIT"
] | 3
|
2018-08-20T14:14:01.000Z
|
2020-06-15T17:39:24.000Z
|
helpers/db_test.py
|
thejeswi/BobGoesToJail
|
ac8a6e4242446634837d6166158fc5401c2818ac
|
[
"MIT"
] | null | null | null |
helpers/db_test.py
|
thejeswi/BobGoesToJail
|
ac8a6e4242446634837d6166158fc5401c2818ac
|
[
"MIT"
] | 1
|
2020-06-15T17:39:26.000Z
|
2020-06-15T17:39:26.000Z
|
from pymongo import MongoClient
client = MongoClient()
client = MongoClient('localhost', 27017)
db = client['raw_law']
laws_db = db.laws
from filters.corpus2DB.schema import raw_law
def insert_law():
newLaw = raw_law()
newLaw.num = "15",
newLaw.title = "Law Title",
newLaw.text = ["This is a law_text"]
law_id = laws_db.insert_one(newLaw.out()).inserted_id
print law_id
def find_law():
found_law = laws_db.find_one({"num":"15"})
print found_law
def find_laws():
from pprint import pprint
found_laws = laws_db.find()
for law in found_laws:
pprint(law)
find_laws()
| 20.9
| 57
| 0.674641
|
bb2616edfa38a9263b89efd33043053520c3ae53
| 559
|
py
|
Python
|
moceansdk/exceptions/__init__.py
|
d3no/mocean-sdk-python
|
cbc215a0eb8aa26c04afb940eab6482f23150c75
|
[
"MIT"
] | null | null | null |
moceansdk/exceptions/__init__.py
|
d3no/mocean-sdk-python
|
cbc215a0eb8aa26c04afb940eab6482f23150c75
|
[
"MIT"
] | null | null | null |
moceansdk/exceptions/__init__.py
|
d3no/mocean-sdk-python
|
cbc215a0eb8aa26c04afb940eab6482f23150c75
|
[
"MIT"
] | null | null | null |
class MoceanErrorException(Exception):
def __init__(self, msg, error_response=None):
if error_response is not None:
super(MoceanErrorException, self).__init__(
error_response['err_msg'])
self._error_response = error_response
else:
super(MoceanErrorException, self).__init__(msg)
@property
def error_response(self):
return self._error_response
class RequiredFieldException(MoceanErrorException):
pass
class InvalidArgumentException(MoceanErrorException):
pass
| 27.95
| 59
| 0.694097
|
fb8d9cc6746634508619bc782e723a37b1331f1d
| 81
|
py
|
Python
|
modtools/version.py
|
yash-agrawal/lapels
|
7aef03226c35ffa125c452d0c3f15f14da432d0d
|
[
"MIT"
] | 2
|
2015-06-21T18:41:49.000Z
|
2018-08-13T18:51:41.000Z
|
modtools/version.py
|
yash-agrawal/lapels
|
7aef03226c35ffa125c452d0c3f15f14da432d0d
|
[
"MIT"
] | 3
|
2015-06-21T18:47:12.000Z
|
2016-09-20T22:31:31.000Z
|
modtools/version.py
|
yash-agrawal/lapels
|
7aef03226c35ffa125c452d0c3f15f14da432d0d
|
[
"MIT"
] | null | null | null |
'''
Created on Oct 31, 2012
@author: Shunping Huang
'''
__mod_version__ = '0.1'
| 11.571429
| 23
| 0.666667
|
f5ebc8e1ab85f151c1fda77ace945aa55f9c9bf1
| 15,976
|
py
|
Python
|
utils/lib/netbox/ingest.py
|
mrlesmithjr/netbox
|
0e9ba9c21e1a054c7ac5b5d9abbb0817883e544c
|
[
"MIT"
] | 4
|
2020-05-10T12:13:02.000Z
|
2021-04-23T13:59:39.000Z
|
utils/lib/netbox/ingest.py
|
mrlesmithjr/netbox
|
0e9ba9c21e1a054c7ac5b5d9abbb0817883e544c
|
[
"MIT"
] | 1
|
2021-09-23T23:31:38.000Z
|
2021-09-23T23:31:38.000Z
|
utils/lib/netbox/ingest.py
|
mrlesmithjr/netbox
|
0e9ba9c21e1a054c7ac5b5d9abbb0817883e544c
|
[
"MIT"
] | null | null | null |
"""lib/netbox/ingest.py"""
class NetBoxIngest:
"""Main NetBox ingestion class"""
def __init__(self, netbox):
self.netbox_data = {}
self.netbox = netbox
def data(self):
"""Collect all relevant NetBox data"""
# DCIM
self.dcim_collections()
# Tenancy
self.tenancy_collections()
# IPAM
self.ipam_collections()
# Virtualization
self.virtualization_collections()
# Circuits
self.circuits_collections()
# Secrets
self.secrets_collections()
# Extras
self.extras_collections()
return self.netbox_data
def dcim_collections(self):
"""Collect DCIM related info"""
self.regions()
self.sites()
self.rack_roles()
self.rack_groups()
self.racks()
self.manufacturers()
self.platforms()
self.device_types()
self.device_roles()
self.devices()
self.interfaces()
self.cables()
self.console_connections()
self.inventory_items()
def tenancy_collections(self):
"""Collect tenancy related info"""
self.tenant_groups()
self.tenants()
def ipam_collections(self):
"""Collect IPAM related info"""
self.roles()
self.vlan_groups()
self.vlans()
self.vrfs()
self.rirs()
self.aggs()
self.prefixes()
self.ip_addresses()
def virtualization_collections(self):
"""Collect virtualization related info"""
self.cluster_groups()
self.cluster_types()
self.clusters()
self.virtual_machines()
self.virtual_interfaces()
def circuits_collections(self):
"""Collect circuit related info"""
self.providers()
self.circuit_types()
self.circuits()
def extras_collections(self):
"""Collect extras related info"""
self.config_contexts()
def secrets_collections(self):
"""Collect secrets related info"""
self.secret_roles()
self.secrets()
def regions(self):
"""Returns all NetBox regions"""
netbox_regions = []
all_regions = self.netbox.dcim.regions.all()
for region in all_regions:
region_info = {'data': dict(region), 'state': 'present'}
netbox_regions.append(region_info)
self.netbox_data['netbox_regions'] = netbox_regions
def sites(self):
"""Returns all NetBox sites"""
netbox_sites = []
all_sites = self.netbox.dcim.sites.all()
for site in all_sites:
site_info = {'data': dict(site), 'state': 'present'}
netbox_sites.append(site_info)
self.netbox_data['netbox_sites'] = netbox_sites
def rack_roles(self):
"""Returns all NetBox rack roles"""
netbox_rack_roles = []
all_rack_roles = self.netbox.dcim.rack_roles.all()
for role in all_rack_roles:
role_info = {'data': dict(
role), 'state': 'present'}
netbox_rack_roles.append(role_info)
self.netbox_data['netbox_rack_roles'] = netbox_rack_roles
def rack_groups(self):
"""Returns all NetBox rack groups"""
netbox_rack_groups = []
all_rack_groups = self.netbox.dcim.rack_groups.all()
for group in all_rack_groups:
group_info = {'data': dict(
group), 'state': 'present'}
netbox_rack_groups.append(group_info)
self.netbox_data['netbox_rack_groups'] = netbox_rack_groups
def racks(self):
"""Returns all NetBox racks"""
netbox_racks = []
all_racks = self.netbox.dcim.racks.all()
for rack in all_racks:
rack_info = {'data': dict(
rack), 'state': 'present'}
netbox_racks.append(rack_info)
self.netbox_data['netbox_racks'] = netbox_racks
def manufacturers(self):
"""Returns all NetBox manufacturers"""
netbox_manufacturers = []
all_manufacturers = self.netbox.dcim.manufacturers.all()
for manufacturer in all_manufacturers:
manufacturer_info = {'data': dict(
manufacturer), 'state': 'present'}
netbox_manufacturers.append(manufacturer_info)
self.netbox_data['netbox_manufacturers'] = netbox_manufacturers
def platforms(self):
"""Returns all NetBox platforms"""
netbox_platforms = []
all_platforms = self.netbox.dcim.platforms.all()
for platform in all_platforms:
platform_info = {'data': dict(platform), 'state': 'present'}
netbox_platforms.append(platform_info)
self.netbox_data['netbox_platforms'] = netbox_platforms
def device_types(self):
"""Returns all NetBox device types"""
netbox_device_types = []
all_netbox_device_types = self.netbox.dcim.device_types.all()
for device_type in all_netbox_device_types:
device_type_info = {'data': dict(device_type), 'state': 'present'}
netbox_device_types.append(device_type_info)
self.netbox_data['netbox_device_types'] = netbox_device_types
def device_roles(self):
"""Returns all NetBox device roles"""
netbox_device_roles = []
all_device_roles = self.netbox.dcim.device_roles.all()
for role in all_device_roles:
role_info = {'data': dict(role), 'state': 'present'}
netbox_device_roles.append(role_info)
self.netbox_data['netbox_device_roles'] = netbox_device_roles
def devices(self):
"""Returns all NetBox devices"""
netbox_devices = []
all_devices = self.netbox.dcim.devices.all()
for device in all_devices:
device_info = {'data': dict(device), 'state': 'present'}
netbox_devices.append(device_info)
self.netbox_data['netbox_devices'] = netbox_devices
def interfaces(self):
"""Returns all NetBox interfaces"""
netbox_device_interfaces = []
all_interfaces = self.netbox.dcim.interfaces.all()
for interface in all_interfaces:
interface_info = {'data': dict(interface), 'state': 'present'}
netbox_device_interfaces.append(interface_info)
self.netbox_data['netbox_device_interfaces'] = netbox_device_interfaces
def cables(self):
"""Returns all NetBox cables"""
netbox_cables = []
all_cables = self.netbox.dcim.cables.all()
for cable in all_cables:
cable_info = {'data': dict(cable), 'state': 'present'}
netbox_cables.append(cable_info)
self.netbox_data['netbox_cables'] = netbox_cables
def console_connections(self):
"""Returns all NetBox console connections"""
netbox_console_connections = []
all_console_connections = self.netbox.dcim.console_connections.all()
for connection in all_console_connections:
connection_info = {'data': dict(connection), 'state': 'present'}
netbox_console_connections.append(connection_info)
self.netbox_data[
'netbox_console_connections'] = netbox_console_connections
def inventory_items(self):
"""Returns all NetBox inventory items"""
netbox_inventory_items = []
all_inventory_items = self.netbox.dcim.inventory_items.all()
for item in all_inventory_items:
item_info = {'data': dict(item), 'state': 'present'}
netbox_inventory_items.append(item_info)
self.netbox_data['netbox_inventory_items'] = netbox_inventory_items
def tenant_groups(self):
"""Returns all NetBox tenant groups"""
netbox_tenant_groups = []
all_tenant_groups = self.netbox.tenancy.tenant_groups.all()
for group in all_tenant_groups:
group_info = {'data': dict(group), 'state': 'present'}
netbox_tenant_groups.append(group_info)
self.netbox_data['netbox_tenant_groups'] = netbox_tenant_groups
def tenants(self):
"""Returns all NetBox tenants"""
netbox_tenants = []
all_tenants = self.netbox.tenancy.tenants.all()
for tenant in all_tenants:
tenant_info = {'data': dict(tenant), 'state': 'present'}
netbox_tenants.append(tenant_info)
self.netbox_data['netbox_tenants'] = netbox_tenants
def roles(self):
"""Returns all NetBox roles"""
netbox_ipam_roles = []
all_roles = self.netbox.ipam.roles.all()
for role in all_roles:
role_info = {'data': dict(role), 'state': 'present'}
netbox_ipam_roles.append(role_info)
self.netbox_data['netbox_ipam_roles'] = netbox_ipam_roles
def vlan_groups(self):
"""Returns all NetBox VLAN groups"""
netbox_vlan_groups = []
all_vlan_groups = self.netbox.ipam.vlan_groups.all()
for group in all_vlan_groups:
group_info = {'data': dict(group), 'state': 'present'}
netbox_vlan_groups.append(group_info)
self.netbox_data['netbox_vlan_groups'] = netbox_vlan_groups
def vlans(self):
"""Returns all NetBox VLANs"""
netbox_vlans = []
all_vlans = self.netbox.ipam.vlans.all()
for vlan in all_vlans:
vlan_info = {'data': dict(vlan), 'state': 'present'}
netbox_vlans.append(vlan_info)
self.netbox_data['netbox_vlans'] = netbox_vlans
def vrfs(self):
"""Returns all NetBox VRFs"""
netbox_vrfs = []
all_vrfs = self.netbox.ipam.vrfs.all()
for vrf in all_vrfs:
vrf_info = {'data': dict(vrf), 'state': 'present'}
netbox_vrfs.append(vrf_info)
self.netbox_data['netbox_vrfs'] = netbox_vrfs
def rirs(self):
"""Returns all NetBox RIRs"""
netbox_rirs = []
all_rirs = self.netbox.ipam.rirs.all()
for rir in all_rirs:
rir_info = {'data': dict(rir), 'state': 'present'}
netbox_rirs.append(rir_info)
self.netbox_data['netbox_rirs'] = netbox_rirs
def aggs(self):
"""Returns all NetBox aggregates"""
netbox_aggregates = []
all_aggs = self.netbox.ipam.aggregates.all()
for agg in all_aggs:
agg_info = {'data': dict(agg), 'state': 'present'}
netbox_aggregates.append(agg_info)
self.netbox_data['netbox_aggregates'] = netbox_aggregates
def prefixes(self):
"""Returns all NetBox prefixes"""
netbox_prefixes = []
all_prefixes = self.netbox.ipam.prefixes.all()
for prefix in all_prefixes:
prefix_info = {'data': dict(prefix), 'state': 'present'}
netbox_prefixes.append(prefix_info)
self.netbox_data['netbox_prefixes'] = netbox_prefixes
def ip_addresses(self):
"""Returns all NetBox IP addresses"""
netbox_ip_addresses = []
all_ip_addresses = self.netbox.ipam.ip_addresses.all()
for address in all_ip_addresses:
address_info = {'data': dict(address), 'state': 'present'}
netbox_ip_addresses.append(address_info)
self.netbox_data['netbox_ip_addresses'] = netbox_ip_addresses
def cluster_groups(self):
"""Returns all NetBox cluster groups"""
netbox_cluster_groups = []
all_cluster_groups = self.netbox.virtualization.cluster_groups.all()
for group in all_cluster_groups:
group_info = {'data': dict(group), 'state': 'present'}
netbox_cluster_groups.append(group_info)
self.netbox_data['netbox_cluster_groups'] = netbox_cluster_groups
def cluster_types(self):
"""Returns all NetBox cluster types"""
netbox_cluster_types = []
all_cluster_types = self.netbox.virtualization.cluster_types.all()
for cluster_type in all_cluster_types:
cluster_info = {'data': dict(cluster_type), 'state': 'present'}
netbox_cluster_types.append(cluster_info)
self.netbox_data['netbox_cluster_types'] = netbox_cluster_types
def clusters(self):
"""Returns all NetBox clusters"""
netbox_clusters = []
all_clusters = self.netbox.virtualization.clusters.all()
for cluster in all_clusters:
cluster_info = {'data': dict(cluster), 'state': 'present'}
netbox_clusters.append(cluster_info)
self.netbox_data['netbox_clusters'] = netbox_clusters
def virtual_machines(self):
"""Returns all NetBox virtual machines"""
netbox_virtual_machines = []
all_vms = self.netbox.virtualization.virtual_machines.all()
for virtual_machine in all_vms:
virtual_machine_info = {'data': dict(
virtual_machine), 'state': 'present'}
netbox_virtual_machines.append(virtual_machine_info)
self.netbox_data['netbox_virtual_machines'] = netbox_virtual_machines
def virtual_interfaces(self):
"""Returns all NetBox virtual machines"""
netbox_virtual_interfaces = []
all_virtual_interfaces = self.netbox.virtualization.interfaces.all()
for interface in all_virtual_interfaces:
interface_info = {'data': dict(
interface), 'state': 'present'}
netbox_virtual_interfaces.append(interface_info)
self.netbox_data['netbox_virtual_interfaces'] = netbox_virtual_interfaces
def providers(self):
"""Returns all NetBox circuit providers"""
netbox_providers = []
all_providers = self.netbox.circuits.providers.all()
for provider in all_providers:
provider_info = {'data': dict(
provider), 'state': 'present'}
netbox_providers.append(provider_info)
self.netbox_data['netbox_providers'] = netbox_providers
def circuit_types(self):
"""Returns all NetBox circuit types"""
netbox_circuit_types = []
all_circuit_types = self.netbox.circuits.circuit_types.all()
for circuit_type in all_circuit_types:
circuit_type_info = {'data': dict(
circuit_type), 'state': 'present'}
netbox_circuit_types.append(circuit_type_info)
self.netbox_data['netbox_circuit_types'] = netbox_circuit_types
def circuits(self):
"""Returns all NetBox circuits"""
netbox_circuits = []
all_circuit = self.netbox.circuits.circuits.all()
for circuit in all_circuit:
circuit_info = {'data': dict(
circuit), 'state': 'present'}
netbox_circuits.append(circuit_info)
self.netbox_data['netbox_circuits'] = netbox_circuits
def secret_roles(self):
"""Returns all NetBox secret roles"""
netbox_secret_roles = []
all_secret_roles = self.netbox.secrets.secret_roles.all()
for role in all_secret_roles:
role_info = {'data': dict(
role), 'state': 'present'}
netbox_secret_roles.append(role_info)
self.netbox_data['netbox_secret_roles'] = netbox_secret_roles
def secrets(self):
"""Returns all NetBox secrets"""
netbox_secrets = []
all_secrets = self.netbox.secrets.secrets.all()
for secret in all_secrets:
secret_info = {'data': dict(
secret), 'state': 'present'}
netbox_secrets.append(secret_info)
self.netbox_data['netbox_secrets'] = netbox_secrets
def config_contexts(self):
"""Returns all NetBox config contexts"""
netbox_config_contexts = []
all_config_contexts = self.netbox.extras.config_contexts.all()
for context in all_config_contexts:
context_info = {'data': dict(context), 'state': 'present'}
netbox_config_contexts.append(context_info)
self.netbox_data['netbox_config_contexts'] = netbox_config_contexts
| 35.740492
| 81
| 0.626377
|
4b2e27b7425b4b13f6fce76c24fbab59a396923d
| 4,333
|
py
|
Python
|
tensorforce/core/optimizers/tf_optimizer.py
|
perara/tensorforce
|
c0da10893d92f44c6fbbf482e511d2ccdc53147d
|
[
"Apache-2.0"
] | 1
|
2019-04-06T10:04:00.000Z
|
2019-04-06T10:04:00.000Z
|
tensorforce/core/optimizers/tf_optimizer.py
|
petrosgk/tensorforce
|
dd04f904acac78fd185ea8ee2c3ce6bac8859c1d
|
[
"Apache-2.0"
] | null | null | null |
tensorforce/core/optimizers/tf_optimizer.py
|
petrosgk/tensorforce
|
dd04f904acac78fd185ea8ee2c3ce6bac8859c1d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce.core.optimizers import Optimizer
class TFOptimizer(Optimizer):
"""
Wrapper class for TensorFlow optimizers.
"""
tf_optimizers = dict(
adadelta=tf.train.AdadeltaOptimizer,
adagrad=tf.train.AdagradOptimizer,
adam=tf.train.AdamOptimizer,
nadam=tf.contrib.opt.NadamOptimizer,
gradient_descent=tf.train.GradientDescentOptimizer,
momentum=tf.train.MomentumOptimizer,
rmsprop=tf.train.RMSPropOptimizer
)
@staticmethod
def get_wrapper(optimizer):
"""
Returns a TFOptimizer constructor callable for the given optimizer name.
Args:
optimizer: The name of the optimizer, one of 'adadelta', 'adagrad', 'adam', 'nadam',
'gradient_descent', 'momentum', 'rmsprop'.
Returns:
The TFOptimizer constructor callable.
"""
def wrapper(**kwargs):
return TFOptimizer(optimizer=optimizer, **kwargs)
return wrapper
def __init__(self, optimizer, scope=None, summary_labels=(), **kwargs):
"""
Creates a new optimizer instance of a TensorFlow optimizer.
Args:
optimizer: The name of the optimizer, one of 'adadelta', 'adagrad', 'adam', 'nadam',
'gradient_descent', 'momentum', 'rmsprop'.
**kwargs: Additional arguments passed on to the TensorFlow optimizer constructor.
"""
self.optimizer_spec = optimizer
self.optimizer = TFOptimizer.tf_optimizers[optimizer](**kwargs)
super(TFOptimizer, self).__init__(scope=(scope or optimizer), summary_labels=summary_labels)
def tf_step(
self,
time,
variables,
arguments,
fn_loss,
**kwargs
):
"""
Creates the TensorFlow operations for performing an optimization step.
Args:
time: Time tensor.
variables: List of variables to optimize.
arguments: Dict of arguments for callables, like fn_loss.
fn_loss: A callable returning the loss of the current model.
**kwargs: Additional arguments, not used.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
loss = fn_loss(**arguments)
with tf.control_dependencies(control_inputs=(loss,)):
# Trivial operation to enforce control dependency
previous_variables = [variable + 0.0 for variable in variables]
with tf.control_dependencies(control_inputs=previous_variables):
applied = self.optimizer.minimize(loss=loss, var_list=variables) # colocate_gradients_with_ops=True
with tf.control_dependencies(control_inputs=(applied,)):
return [
variable - previous_variable
for variable, previous_variable in zip(variables, previous_variables)
]
def get_variables(self):
optimizer_variables = super(TFOptimizer, self).get_variables()
slots_variables = [
self.optimizer._slots[slot][key]
for slot in sorted(self.optimizer._slots)
for key in sorted(self.optimizer._slots[slot])
]
if self.optimizer_spec in ('adam', 'nadam'):
additional_variables = [self.optimizer._beta1_power, self.optimizer._beta2_power]
else:
additional_variables = list()
return optimizer_variables + slots_variables + additional_variables
| 35.809917
| 112
| 0.649896
|
6575dc57538f9d81bb46603e7b09e4b3c091c775
| 870
|
py
|
Python
|
python_lib/setup.py
|
scorelab/bassa-client-libraries
|
a1a9d6c73d624646bdeabc5ffeb8c8ad28957887
|
[
"Apache-2.0"
] | 7
|
2020-05-20T15:35:54.000Z
|
2020-06-20T08:18:41.000Z
|
python_lib/setup.py
|
scorelab/BassaClient
|
a1a9d6c73d624646bdeabc5ffeb8c8ad28957887
|
[
"Apache-2.0"
] | 12
|
2020-05-23T08:50:13.000Z
|
2020-06-28T09:01:58.000Z
|
python_lib/setup.py
|
scorelab/bassa-client-libraries
|
a1a9d6c73d624646bdeabc5ffeb8c8ad28957887
|
[
"Apache-2.0"
] | 2
|
2020-05-20T15:35:56.000Z
|
2020-05-21T20:47:48.000Z
|
import setuptools
def readme():
with open('README.md') as f:
README = f.read()
return README
setuptools.setup(
name="bassa-kmehant",
version="1.0.0-alpha.1",
author="Mehant Kammakomati",
author_email="kmehant@scorelab.org",
description="Python Client Library for the Bassa Project",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/scorelab/BassaClient",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: Apache 2.0 License",
"Operating System :: OS Independent",
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Topic :: Software Development :: Libraries",
],
python_requires='>=3.6',
)
| 28.064516
| 62
| 0.63908
|
e2f548475bbe7b7c32e2650c462d529df5e4b1bb
| 5,003
|
py
|
Python
|
test/mapping/who2016_child_mapping.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 4
|
2019-01-23T12:57:47.000Z
|
2020-04-18T17:13:08.000Z
|
test/mapping/who2016_child_mapping.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 4
|
2019-01-09T22:10:07.000Z
|
2022-02-16T04:57:06.000Z
|
test/mapping/who2016_child_mapping.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 11
|
2018-12-11T22:01:13.000Z
|
2022-01-07T11:38:02.000Z
|
MAPPING = [
{
'sid': 'Mother alive',
'symptom': 's7',
'Id10356': 'yes'
},
{
'sid': 'Baby cried',
'symptom': 's17',
'Id10104': 'yes'
},
{
'sid': 'Baby moved',
'symptom': 's18',
'Id10109': 'yes'
},
{
'sid': 'Baby breathed',
'symptom': 's19',
'Id10110': 'yes'
},
# { # Not on short form
# 'sid': 'Fever continue until death',
# 'symptom': 's112',
# 'Id10149': 'yes'
# },
{
'sid': 'Diarrhea',
'symptom': 's115',
'Id10181': 'yes'
},
{
'sid': 'Diarrhea continue until death',
'symptom': 's118',
'Id10185': 'yes'
},
# { # Not on short form
# 'sid': 'Blood in stool',
# 'symptom': 's120',
# 'Id10186': 'yes'
# },
{
'sid': 'Cough',
'symptom': 's121',
'Id10153': 'yes'
},
{
'sid': 'Severe cough',
'symptom': 's123',
'Id10156': 'yes'
},
# No corresponding data for s124: Vomit after cough
# { # Not on short form
# 'sid': 'Vomit after cough',
# 'symptom': 's124',
# 'child_4_15': 'yes'
# },
{
'sid': 'Difficulty breathing',
'symptom': 's125',
'Id10159': 'yes'
},
{
'sid': 'Fast breathing',
'symptom': 's127',
'Id10166': 'yes'
},
{
'sid': 'Indrawing chest',
'symptom': 's129',
'Id10172': 'yes'
},
# See Id10173_nc for multi-select
# {
# 'sid': 'Stridor',
# 'symptom': 's130',
# 'child_4_22': 'yes'
# },
# {
# 'sid': 'Grunting',
# 'symptom': 's131',
# 'child_4_23': 'yes'
# },
# {
# 'sid': 'Wheezing',
# 'symptom': 's132',
# 'child_4_24': 'yes'
# },
{
'sid': 'Convulsions',
'symptom': 's133',
'Id10220': 'yes'
},
{
'sid': 'Unconsciousness',
'symptom': 's134',
'Id10214': 'yes'
},
{
'sid': 'Stiff neck',
'symptom': 's136',
'Id10208': 'yes'
},
{
'sid': 'Bulging fontanelle',
'symptom': 's137',
'Id10278': 'yes'
},
{
'sid': 'Skin rash',
'symptom': 's138',
'Id10233': 'yes'
},
# No corresponding data for s143: Rash had blisters
# Could be Id10236?
# {
# 'sid': 'Rash had blisters',
# 'symptom': 's143',
# 'child_4_34': 'yes'
# },
# No corresponding data for s144: Limbs became thin
# {
# 'sid': 'Limbs became thin',
# 'symptom': 's144',
# 'child_4_35': 'yes'
# },
# { # Not on short form
# 'sid': 'Swollen legs',
# 'symptom': 's145',
# 'Id10249': 'yes'
# },
{
'sid': 'Skin flake off in patches',
'symptom': 's147',
'Id10238': 'yes'
},
{
'sid': 'Hair change to reddish or yellowish color',
'symptom': 's148',
'Id10267': 'yes'
},
{
'sid': 'Protruding belly',
'symptom': 's149',
'Id10200': 'yes'
},
{
'sid': 'Pallor',
'symptom': 's150',
'Id10268': 'yes'
},
{
'sid': 'Swelling in armpits',
'symptom': 's151',
'Id10256': 'yes'
},
# { # Not on short form
# 'sid': 'Whitish rash in mouth',
# 'symptom': 's152',
# 'Id10245': 'yes'
# },
{
'sid': 'Bleeding',
'symptom': 's153',
'Id10241': 'yes'
},
{
'sid': 'Skin turned black',
'symptom': 's154',
'Id10239': 'yes'
},
{
'sid': 'Intentionally inflicted injury',
'symptom': 's165',
'Id10100': 'yes'
},
{
'sid': 'Mother ever tested for HIV',
'symptom': 's188',
'Id10445': 'yes'
},
# No corresponding data for s189: Mother tested positive for HIV
# {
# 'sid': 'Mother tested positive for HIV',
# 'symptom': 's189',
# 'child_5_18': 'yes'
# },
{
'sid': 'Mother told she had HIV by health worker',
'symptom': 's190',
'Id10446': 'yes'
},
{
'sid': 'Free text: abdomen',
'symptom': 's99991',
'Id10478': 'abdomen',
},
{
'sid': 'Free text: cancer',
'symptom': 's99999',
'Id10478': 'cancer dehydration',
},
{
'sid': 'Free text: dehydration',
'symptom': 's999914',
'Id10478': 'cancer dehydration',
},
{
'sid': 'Free text: dengue',
'symptom': 's999915',
'Id10478': 'dengue diarrhea fever',
},
{
'sid': 'Free text: diarrhea',
'symptom': 's999916',
'Id10478': 'dengue diarrhea fever',
},
{
'sid': 'Free text: fever',
'symptom': 's999919',
'Id10478': 'dengue diarrhea fever',
},
]
| 22.137168
| 68
| 0.41635
|
14ee41fa6822d462a533f523294d0ef5681c903c
| 9,790
|
py
|
Python
|
docs/conf.py
|
joaopfonseca/ml-research
|
a2a063e341010397bd13df812109f31ce05ac9f7
|
[
"MIT"
] | 1
|
2021-12-13T09:27:06.000Z
|
2021-12-13T09:27:06.000Z
|
docs/conf.py
|
joaopfonseca/research
|
ac4ad6fa05b5985050c63dc9e4e18cd00965e09b
|
[
"MIT"
] | 20
|
2021-12-10T11:54:59.000Z
|
2022-03-18T17:55:33.000Z
|
docs/conf.py
|
joaopfonseca/research
|
ac4ad6fa05b5985050c63dc9e4e18cd00965e09b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# research documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import sphinx_material
from recommonmark.transform import AutoStructify
FORCE_CLASSIC = os.environ.get("SPHINX_MATERIAL_FORCE_CLASSIC", False)
FORCE_CLASSIC = FORCE_CLASSIC in ("1", "true")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.pardir))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"numpydoc",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.imgmath",
"nbsphinx",
"recommonmark",
"sphinx_markdown_tables",
"sphinx_copybutton",
]
autosummary_generate = True
autoclass_content = "class"
autodoc_default_flags = ["members", "inherited-members"]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
# numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = "True"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "mlresearch"
from datetime import datetime
copyright = f"{datetime.now().year}, João Fonseca"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from mlresearch import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_templates"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_material"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Set the name of the project to appear in the navigation.
"nav_title": "ML-Research",
"nav_links": [],
"heroes": {
"index": "Implementation of Machine Learning algorithms, experiments and utilities.",
},
# Specify a base_url used to generate sitemap.xml.
# 'base_url': 'https://project.github.io/project',
# Set the color and the accent color
"color_primary": "blue",
"color_accent": "light-blue",
# Set the repo location to get a badge with stats
"repo_url": "https://github.com/joaopfonseca/ml-research/",
"repo_name": "ml-research",
# Icon codes: https://codepen.io/btn-ninja/pen/YrXmax
"logo_icon": "",
# Visible levels of the global TOC; -1 means unlimited
"globaltoc_depth": 2,
# If False, expand all TOC entries
"globaltoc_collapse": True,
# If True, show hidden TOC entries
"globaltoc_includehidden": False,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "ml-research"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_templates/logo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "researchdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "research.tex", "ml-research Documentation", "joaopfonseca", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "ml-research", "ml-research Documentation", ["joaopfonseca"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"ml-research",
"ml-research Documentation",
"joaopfonseca",
"ml-research",
"A short description of the project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.993464
| 93
| 0.703779
|
96263cbb0e9bd5a42cb3f8a946aab1e010b729bc
| 2,888
|
py
|
Python
|
scipy/fftpack/__init__.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | 2
|
2015-10-30T10:04:46.000Z
|
2017-03-11T00:58:21.000Z
|
scipy/fftpack/__init__.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/fftpack/__init__.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
==================================================
Discrete Fourier transforms (:mod:`scipy.fftpack`)
==================================================
Fast Fourier Transforms (FFTs)
==============================
.. autosummary::
:toctree: generated/
fft - Fast (discrete) Fourier Transform (FFT)
ifft - Inverse FFT
fft2 - Two dimensional FFT
ifft2 - Two dimensional inverse FFT
fftn - n-dimensional FFT
ifftn - n-dimensional inverse FFT
rfft - FFT of strictly real-valued sequence
irfft - Inverse of rfft
dct - Discrete cosine transform
idct - Inverse discrete cosine transform
Differential and pseudo-differential operators
==============================================
.. autosummary::
:toctree: generated/
diff - Differentiation and integration of periodic sequences
tilbert - Tilbert transform: cs_diff(x,h,h)
itilbert - Inverse Tilbert transform: sc_diff(x,h,h)
hilbert - Hilbert transform: cs_diff(x,inf,inf)
ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf)
cs_diff - cosh/sinh pseudo-derivative of periodic sequences
sc_diff - sinh/cosh pseudo-derivative of periodic sequences
ss_diff - sinh/sinh pseudo-derivative of periodic sequences
cc_diff - cosh/cosh pseudo-derivative of periodic sequences
shift - Shift periodic sequences
Helper functions
================
.. autosummary::
:toctree: generated/
fftshift - Shift the zero-frequency component to the center of the spectrum
ifftshift - The inverse of `fftshift`
fftfreq - Return the Discrete Fourier Transform sample frequencies
rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
Convolutions (:mod:`scipy.fftpack.convolve`)
============================================
.. module:: scipy.fftpack.convolve
.. autosummary::
:toctree: generated/
convolve
convolve_z
init_convolution_kernel
destroy_convolve_cache
Other (:mod:`scipy.fftpack._fftpack`)
=====================================
.. module:: scipy.fftpack._fftpack
.. autosummary::
:toctree: generated/
drfft
zfft
zrfft
zfftnd
destroy_drfft_cache
destroy_zfft_cache
destroy_zfftnd_cache
"""
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2',
'diff',
'tilbert','itilbert','hilbert','ihilbert',
'sc_diff','cs_diff','cc_diff','ss_diff',
'shift',
'rfftfreq'
]
from fftpack_version import fftpack_version as __version__
from basic import *
from pseudo_diffs import *
from helper import *
from numpy.dual import register_func
for k in ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2']:
register_func(k, eval(k))
del k, register_func
from realtransforms import *
__all__.extend(['dct', 'idct', 'dst', 'idst'])
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| 26.254545
| 78
| 0.634003
|
c8996ae4229ee74a69510d5c06e8001344d093f6
| 4,684
|
py
|
Python
|
doc2dash/parsers/sphinx.py
|
ofan/doc2dash
|
6691678b9f86e6c357d4257d6634f9e29f6f087d
|
[
"MIT"
] | null | null | null |
doc2dash/parsers/sphinx.py
|
ofan/doc2dash
|
6691678b9f86e6c357d4257d6634f9e29f6f087d
|
[
"MIT"
] | 2
|
2021-03-31T19:56:06.000Z
|
2021-12-13T20:45:07.000Z
|
doc2dash/parsers/sphinx.py
|
ofan/doc2dash
|
6691678b9f86e6c357d4257d6634f9e29f6f087d
|
[
"MIT"
] | null | null | null |
import errno
import logging
import os
import re
from bs4 import BeautifulSoup
from . import types
from .base import _BaseParser
log = logging.getLogger(__name__)
class SphinxParser(_BaseParser):
"""Parser for Sphinx-based documenation: Python, Django, Pyramid..."""
name = 'sphinx'
DETECT_FILE = '_static/searchtools.js'
DETECT_PATTERN = '* Sphinx JavaScript util'
def parse(self):
"""Parse sphinx docs at *path*.
yield tuples of symbol name, type and path
"""
for idx in POSSIBLE_INDEXES:
try:
soup = BeautifulSoup(open(os.path.join(self.docpath, idx)),
'lxml')
break
except IOError:
pass
else:
raise IOError(errno.ENOENT, 'Essential index file not found.')
for t in _parse_soup(soup):
yield t
def find_and_patch_entry(self, soup, entry):
"""Modify soup so dash can generate TOCs on the fly."""
link = soup.find('a', {'class': 'headerlink'}, href='#' + entry.anchor)
tag = soup.new_tag('a')
tag['name'] = self.APPLE_REF.format(entry.type, entry.name)
if link:
link.parent.insert(0, tag)
return True
elif entry.anchor.startswith('module-'):
soup.h1.parent.insert(0, tag)
return True
else:
return False
POSSIBLE_INDEXES = [
'genindex-all.html',
'genindex.html',
]
def _parse_soup(soup):
log.info('Creating database...')
for table in soup('table', {'class': 'genindextable'}):
for td in table('td'):
for dl in td('dl', recursive=False):
for dt in dl('dt', recursive=False):
if not dt.a:
continue
type_, name = _get_type_and_name(dt.a.string)
if name:
href = dt.a['href']
tmp_name = _url_to_name(href, type_)
if not tmp_name.startswith('index-'):
yield tmp_name, type_, href
else:
name = _strip_annotation(dt.a.string)
dd = dt.next_sibling.next_sibling
if dd and dd.name == 'dd':
for y in _process_dd(name, dd):
yield y
RE_ANNO = re.compile(r'(.+) \(.*\)')
def _strip_annotation(text):
"""Transforms 'foo (class in bar)' to 'foo'."""
m = RE_ANNO.match(text)
if m:
return m.group(1)
else:
return text.strip()
def _url_to_name(url, type_):
"""Certain types have prefixes in names we have to strip before adding."""
if type_ == types.PACKAGE or type_ == types.CONSTANT and 'opcode-' in url:
return url.split('#')[1][7:]
else:
return url.split('#')[1]
def _process_dd(name, dd):
"""Process a <dd> block as used by Sphinx on multiple symbols/name.
All symbols inherit the *name* of the first.
"""
for dt in dd('dt'):
text = dt.text.strip()
type_ = _get_type(text)
if type_:
if type_ == _IN_MODULE:
type_ = _guess_type_by_name(name)
full_name = _url_to_name(dt.a['href'], type_)
if not full_name.startswith('index-'):
yield full_name, type_, dt.a['href']
def _guess_type_by_name(name):
"""Module level functions and constants are not distinguishable."""
if name.endswith('()'):
return types.FUNCTION
else:
return types.CONSTANT
def _get_type(text):
return _get_type_and_name(text)[0]
_IN_MODULE = '_in_module'
TYPE_MAPPING = [
(re.compile(r'(.*)\(\S+ method\)$'), types.METHOD),
(re.compile(r'(.*)\(.*function\)$'), types.FUNCTION),
(re.compile(r'(.*)\(\S+ attribute\)$'), types.ATTRIBUTE),
(re.compile(r'(.*)\(\S+ member\)$'), types.ATTRIBUTE),
(re.compile(r'(.*)\(class in \S+\)$'), types.CLASS),
(re.compile(r'(.*)\(built-in class\)$'), types.CLASS),
(re.compile(r'(.*)\(built-in variable\)$'), types.CONSTANT),
(re.compile(r'(.*)\(module\)$'), types.PACKAGE),
(re.compile(r'(.*)\(opcode\)$'), types.CONSTANT),
(re.compile(r'(.*)\(in module \S+\)$'), _IN_MODULE),
]
def _get_type_and_name(text):
for mapping in TYPE_MAPPING:
match = mapping[0].match(text)
if match:
name = match.group(1).strip()
type_ = mapping[1]
if type_ == _IN_MODULE and name:
type_ = _guess_type_by_name(name)
return type_, name
else:
return None, None
| 29.093168
| 79
| 0.54526
|
0dccd1c415b1d2bd1cd969fa2f6091e843b0a658
| 3,835
|
py
|
Python
|
misc/config_tools/scenario_config/ivshmem_cfg_h.py
|
donsheng/acrn-hypervisor
|
79edf8ba08f3f6d11d1ccf464b208c80b5b0fd24
|
[
"BSD-3-Clause"
] | 2
|
2020-09-14T06:48:39.000Z
|
2020-10-12T14:21:01.000Z
|
misc/config_tools/scenario_config/ivshmem_cfg_h.py
|
donsheng/acrn-hypervisor
|
79edf8ba08f3f6d11d1ccf464b208c80b5b0fd24
|
[
"BSD-3-Clause"
] | 4
|
2021-11-09T10:08:28.000Z
|
2021-12-30T06:13:44.000Z
|
misc/config_tools/scenario_config/ivshmem_cfg_h.py
|
donsheng/acrn-hypervisor
|
79edf8ba08f3f6d11d1ccf464b208c80b5b0fd24
|
[
"BSD-3-Clause"
] | 2
|
2019-03-25T02:54:20.000Z
|
2019-03-27T04:06:21.000Z
|
# Copyright (C) 2019 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import common
import scenario_cfg_lib
import board_cfg_lib
IVSHMEM_HEADER_DEFINE = scenario_cfg_lib.HEADER_LICENSE + r"""
#ifndef IVSHMEM_CFG_H
#define IVSHMEM_CFG_H
"""
IVSHMEM_END_DEFINE = r"""#endif /* IVSHMEM_CFG_H */"""
def gen_common_header(config):
"""
This is common header for ivshmem_cfg.h
:param config: it is the pointer which file write to
:return: None
"""
print("{0}".format(IVSHMEM_HEADER_DEFINE), file=config)
def write_shmem_regions(config):
raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
shmem_regions = []
shmem_dev_num = 0
for raw_shm in raw_shmem_regions:
if raw_shm is None or raw_shm.strip() == '':
continue
raw_shm_splited = raw_shm.split(',')
if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \
and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1:
shmem_regions.append((raw_shm_splited[0].strip(), raw_shm_splited[1].strip(), raw_shm_splited[2].strip().split(':')))
shmem_dev_num += len(raw_shm_splited[2].strip().split(':'))
if len(shmem_regions) > 0:
shmem_cnt = 0
print("", file=config)
for shmem_region in shmem_regions:
print("#define IVSHMEM_SHM_REGION_%d\t"%shmem_cnt, end="", file=config)
print('"{}"'.format(shmem_region[0]), file=config)
shmem_cnt += 1
print("", file=config)
print("/*", file=config)
print(" * The IVSHMEM_SHM_SIZE is the sum of all memory regions.", file=config)
print(" * The size range of each memory region is [2MB, 512MB] and is a power of 2.", file=config)
print(" */", file=config)
total_shm_size = 0
if len(shmem_regions) > 0:
for shmem_region in shmem_regions:
int_size = 0
size = shmem_region[1]
try:
int_size = int(size) * 0x100000
except Exception as e:
print('the format of shm size error: ', str(e))
total_shm_size += int_size
print("#define IVSHMEM_SHM_SIZE\t{}UL".format(hex(total_shm_size)), file=config)
print("#define IVSHMEM_DEV_NUM\t\t{}UL".format(shmem_dev_num), file=config)
print("", file=config)
print("/* All user defined memory regions */", file=config)
if len(shmem_regions) == 0:
print("#define IVSHMEM_SHM_REGIONS", file=config)
else:
print("#define IVSHMEM_SHM_REGIONS \\", file=config)
shmem_cnt = 0
for shmem in shmem_regions:
print("\t{ \\", file=config)
print('\t\t.name = IVSHMEM_SHM_REGION_{}, \\'.format(shmem_cnt), file=config)
try:
int_size = int(shmem[1]) * 0x100000
except:
int_size = 0
print('\t\t.size = {}UL,\t\t/* {}M */ \\'.format(hex(int_size), shmem[1]), file=config)
if shmem_cnt < len(shmem_regions) - 1:
print("\t}, \\", file=config)
else:
print("\t},", file=config)
shmem_cnt += 1
print("", file=config)
def generate_file(scenario_items, config):
"""
Start to generate ivshmem_cfg.h
:param scenario_items: it is the class which contain all user setting information
:param config: it is a file pointer of scenario information for writing to
"""
vm_info = scenario_items['vm']
gen_common_header(config)
if vm_info.shmem.shmem_enabled == 'y':
print("#include <ivshmem.h>", file=config)
print("#include <asm/pgtable.h>", file=config)
write_shmem_regions(config)
print("{0}".format(IVSHMEM_END_DEFINE), file=config)
| 37.598039
| 129
| 0.619296
|
33d09199ec422681a4315ded2400ab8c12ced50e
| 575
|
py
|
Python
|
engine/date_tools.py
|
gabrii/TrendsEngine
|
afe141704421aaac999cda8c3292fb418fb86cfa
|
[
"MIT"
] | null | null | null |
engine/date_tools.py
|
gabrii/TrendsEngine
|
afe141704421aaac999cda8c3292fb418fb86cfa
|
[
"MIT"
] | null | null | null |
engine/date_tools.py
|
gabrii/TrendsEngine
|
afe141704421aaac999cda8c3292fb418fb86cfa
|
[
"MIT"
] | null | null | null |
from datetime import datetime, date
def from_timestamp(timestamp: int) -> date:
"""Returns date from timestamp in utc."""
return datetime.utcfromtimestamp(timestamp)
def isocalendar(_date: date) -> (int, int):
"""Wraper for isocaReturns a tuple with (year, week).
The week is from ISO-8601 Calendar, not gregorian calendar weeks.
This way each week has the same number of days.
"""
year, week, day = _date.isocalendar()
return year, week, day
def date_suffix(_date: date) -> str:
return "{}:{}".format(*_date.isocalendar())
| 28.75
| 72
| 0.673043
|
6b7406e8e3619d94aee96dbff9cdf76be604a9f7
| 610
|
py
|
Python
|
utils_components/string.py
|
BhasherBEL/ProgrammingChallenges
|
e697c7f7e3d8177b9ee615918f3c78b645b927d0
|
[
"MIT"
] | null | null | null |
utils_components/string.py
|
BhasherBEL/ProgrammingChallenges
|
e697c7f7e3d8177b9ee615918f3c78b645b927d0
|
[
"MIT"
] | 1
|
2020-12-09T12:00:56.000Z
|
2020-12-09T12:00:56.000Z
|
utils_components/string.py
|
BhasherBEL/ProgrammingChallenges
|
e697c7f7e3d8177b9ee615918f3c78b645b927d0
|
[
"MIT"
] | 1
|
2020-12-09T11:38:49.000Z
|
2020-12-09T11:38:49.000Z
|
class String:
@staticmethod
def levenshtein_distance(w1: str, w2: str) -> int:
if len(w1) == 0 or len(w2) == 0:
return max(len(w1), len(w2))
if w1[0] == w2[0]:
return String.levenshtein_distance(w1[1:], w2[1:])
return 1 + min(
String.levenshtein_distance(w1[1:], w2),
String.levenshtein_distance(w1, w2[1:]),
String.levenshtein_distance(w1[1:], w2[1:]),
)
@staticmethod
def difference_count(w1: str, w2: str) -> int:
return sum([l1 != l2 for l1, l2 in zip(w1, w2)]) + abs(len(w1) - len(w2))
| 32.105263
| 81
| 0.540984
|
a6eb5a3372bbaf375f66e66c6814e8a325377e0a
| 8,722
|
py
|
Python
|
proxy/http/websocket.py
|
certusone/proxy-model.py
|
9b6babbc4b78cc8e51e7286ea27ad724662c4f0f
|
[
"BSD-3-Clause"
] | 4
|
2021-09-10T21:29:27.000Z
|
2021-11-17T01:43:58.000Z
|
proxy/http/websocket.py
|
certusone/proxy-model.py
|
9b6babbc4b78cc8e51e7286ea27ad724662c4f0f
|
[
"BSD-3-Clause"
] | 339
|
2021-04-28T06:38:54.000Z
|
2022-03-31T11:16:36.000Z
|
proxy/http/websocket.py
|
certusone/proxy-model.py
|
9b6babbc4b78cc8e51e7286ea27ad724662c4f0f
|
[
"BSD-3-Clause"
] | 5
|
2021-08-17T14:30:28.000Z
|
2022-01-18T07:19:47.000Z
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import hashlib
import base64
import selectors
import struct
import socket
import secrets
import ssl
import ipaddress
import logging
import io
from typing import TypeVar, Type, Optional, NamedTuple, Union, Callable
from .parser import httpParserTypes, HttpParser
from ..common.constants import DEFAULT_BUFFER_SIZE
from ..common.utils import new_socket_connection, build_websocket_handshake_request
from ..core.connection import tcpConnectionTypes, TcpConnection
WebsocketOpcodes = NamedTuple('WebsocketOpcodes', [
('CONTINUATION_FRAME', int),
('TEXT_FRAME', int),
('BINARY_FRAME', int),
('CONNECTION_CLOSE', int),
('PING', int),
('PONG', int),
])
websocketOpcodes = WebsocketOpcodes(0x0, 0x1, 0x2, 0x8, 0x9, 0xA)
V = TypeVar('V', bound='WebsocketFrame')
logger = logging.getLogger(__name__)
class WebsocketFrame:
"""Websocket frames parser and constructor."""
GUID = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
def __init__(self) -> None:
self.fin: bool = False
self.rsv1: bool = False
self.rsv2: bool = False
self.rsv3: bool = False
self.opcode: int = 0
self.masked: bool = False
self.payload_length: Optional[int] = None
self.mask: Optional[bytes] = None
self.data: Optional[bytes] = None
@classmethod
def text(cls: Type[V], data: bytes) -> bytes:
frame = cls()
frame.fin = True
frame.opcode = websocketOpcodes.TEXT_FRAME
frame.data = data
return frame.build()
def reset(self) -> None:
self.fin = False
self.rsv1 = False
self.rsv2 = False
self.rsv3 = False
self.opcode = 0
self.masked = False
self.payload_length = None
self.mask = None
self.data = None
def parse_fin_and_rsv(self, byte: int) -> None:
self.fin = bool(byte & 1 << 7)
self.rsv1 = bool(byte & 1 << 6)
self.rsv2 = bool(byte & 1 << 5)
self.rsv3 = bool(byte & 1 << 4)
self.opcode = byte & 0b00001111
def parse_mask_and_payload(self, byte: int) -> None:
self.masked = bool(byte & 0b10000000)
self.payload_length = byte & 0b01111111
def build(self) -> bytes:
if self.payload_length is None and self.data:
self.payload_length = len(self.data)
raw = io.BytesIO()
raw.write(
struct.pack(
'!B',
(1 << 7 if self.fin else 0) |
(1 << 6 if self.rsv1 else 0) |
(1 << 5 if self.rsv2 else 0) |
(1 << 4 if self.rsv3 else 0) |
self.opcode
))
assert self.payload_length is not None
if self.payload_length < 126:
raw.write(
struct.pack(
'!B',
(1 << 7 if self.masked else 0) | self.payload_length
)
)
elif self.payload_length < 1 << 16:
raw.write(
struct.pack(
'!BH',
(1 << 7 if self.masked else 0) | 126,
self.payload_length
)
)
elif self.payload_length < 1 << 64:
raw.write(
struct.pack(
'!BHQ',
(1 << 7 if self.masked else 0) | 127,
self.payload_length
)
)
else:
raise ValueError(f'Invalid payload_length { self.payload_length },'
f'maximum allowed { 1 << 64 }')
if self.masked and self.data:
mask = secrets.token_bytes(4) if self.mask is None else self.mask
raw.write(mask)
raw.write(self.apply_mask(self.data, mask))
elif self.data:
raw.write(self.data)
return raw.getvalue()
def parse(self, raw: bytes) -> bytes:
cur = 0
self.parse_fin_and_rsv(raw[cur])
cur += 1
self.parse_mask_and_payload(raw[cur])
cur += 1
if self.payload_length == 126:
data = raw[cur: cur + 2]
self.payload_length, = struct.unpack('!H', data)
cur += 2
elif self.payload_length == 127:
data = raw[cur: cur + 8]
self.payload_length, = struct.unpack('!Q', data)
cur += 8
if self.masked:
self.mask = raw[cur: cur + 4]
cur += 4
assert self.payload_length
self.data = raw[cur: cur + self.payload_length]
cur += self.payload_length
if self.masked:
assert self.mask is not None
self.data = self.apply_mask(self.data, self.mask)
return raw[cur:]
@staticmethod
def apply_mask(data: bytes, mask: bytes) -> bytes:
raw = bytearray(data)
for i in range(len(raw)):
raw[i] = raw[i] ^ mask[i % 4]
return bytes(raw)
@staticmethod
def key_to_accept(key: bytes) -> bytes:
sha1 = hashlib.sha1()
sha1.update(key + WebsocketFrame.GUID)
return base64.b64encode(sha1.digest())
class WebsocketClient(TcpConnection):
def __init__(self,
hostname: Union[ipaddress.IPv4Address, ipaddress.IPv6Address],
port: int,
path: bytes = b'/',
on_message: Optional[Callable[[WebsocketFrame], None]] = None) -> None:
super().__init__(tcpConnectionTypes.CLIENT)
self.hostname: Union[ipaddress.IPv4Address,
ipaddress.IPv6Address] = hostname
self.port: int = port
self.path: bytes = path
self.sock: socket.socket = new_socket_connection(
(str(self.hostname), self.port))
self.on_message: Optional[Callable[[
WebsocketFrame], None]] = on_message
self.upgrade()
self.sock.setblocking(False)
self.selector: selectors.DefaultSelector = selectors.DefaultSelector()
@property
def connection(self) -> Union[ssl.SSLSocket, socket.socket]:
return self.sock
def upgrade(self) -> None:
key = base64.b64encode(secrets.token_bytes(16))
self.sock.send(build_websocket_handshake_request(key, url=self.path))
response = HttpParser(httpParserTypes.RESPONSE_PARSER)
response.parse(self.sock.recv(DEFAULT_BUFFER_SIZE))
accept = response.header(b'Sec-Websocket-Accept')
assert WebsocketFrame.key_to_accept(key) == accept
def ping(self, data: Optional[bytes] = None) -> None:
pass
def pong(self, data: Optional[bytes] = None) -> None:
pass
def shutdown(self, _data: Optional[bytes] = None) -> None:
"""Closes connection with the server."""
super().close()
def run_once(self) -> bool:
ev = selectors.EVENT_READ
if self.has_buffer():
ev |= selectors.EVENT_WRITE
self.selector.register(self.sock.fileno(), ev)
events = self.selector.select(timeout=1)
self.selector.unregister(self.sock)
for _, mask in events:
if mask & selectors.EVENT_READ and self.on_message:
raw = self.recv()
if raw is None or raw.tobytes() == b'':
self.closed = True
logger.debug('Websocket connection closed by server')
return True
frame = WebsocketFrame()
# TODO(abhinavsingh): Remove .tobytes after parser is
# memoryview compliant
frame.parse(raw.tobytes())
self.on_message(frame)
elif mask & selectors.EVENT_WRITE:
logger.debug(self.buffer)
self.flush()
return False
def run(self) -> None:
logger.debug('running')
try:
while not self.closed:
teardown = self.run_once()
if teardown:
break
except KeyboardInterrupt:
pass
finally:
try:
self.selector.unregister(self.sock)
self.sock.shutdown(socket.SHUT_WR)
except Exception as e:
logging.exception(
'Exception while shutdown of websocket client', exc_info=e)
self.sock.close()
logger.info('done')
| 32.423792
| 88
| 0.560881
|
4f1321437fc6605c0a810469d2f2d7761a9ab0bd
| 4,854
|
py
|
Python
|
src/zocalo/cli/queue_drain.py
|
woutdenolf/python-zocalo
|
d35f1316bcc34c32bd6a8c62e6eab2c3c1484443
|
[
"BSD-3-Clause"
] | null | null | null |
src/zocalo/cli/queue_drain.py
|
woutdenolf/python-zocalo
|
d35f1316bcc34c32bd6a8c62e6eab2c3c1484443
|
[
"BSD-3-Clause"
] | 1
|
2021-12-10T15:55:19.000Z
|
2021-12-13T12:35:11.000Z
|
src/zocalo/cli/queue_drain.py
|
stufisher/python-zocalo
|
d35f1316bcc34c32bd6a8c62e6eab2c3c1484443
|
[
"BSD-3-Clause"
] | null | null | null |
#
# zocalo.queue_drain
# Drain one queue into another in a controlled manner
#
import argparse
import queue
import sys
import time
from datetime import datetime
import workflows.recipe.wrapper
import workflows.transport
import zocalo.configuration
def show_cluster_info(step):
try:
print("Beamline " + step["parameters"]["cluster_project"].upper())
except Exception:
pass
try:
print("Working directory " + step["parameters"]["workingdir"])
except Exception:
pass
show_additional_info = {"cluster.submission": show_cluster_info}
def run(args=None):
# Load configuration
zc = zocalo.configuration.from_file()
zc.activate()
parser = argparse.ArgumentParser(
usage="zocalo.queue_drain [options] source destination"
)
parser.add_argument("-?", action="help", help=argparse.SUPPRESS)
parser.add_argument("SOURCE", type=str, help="Source queue name")
parser.add_argument(
"DEST",
type=str,
help="Destination queue name;"
" use '.' to automatically determine destination for recipe messages",
)
parser.add_argument(
"--wait",
action="store",
dest="wait",
type=float,
default=5,
help="Wait this many seconds between deliveries",
)
parser.add_argument(
"--stop",
action="store",
dest="stop",
type=float,
default=60,
help="Stop if no message seen for this many seconds (0 = forever)",
)
zc.add_command_line_options(parser)
workflows.transport.add_command_line_options(parser, transport_argument=True)
args = parser.parse_args(args)
transport = workflows.transport.lookup(args.transport)()
transport.connect()
messages = queue.Queue()
def receive_message(header, message):
messages.put((header, message))
print(f"Reading messages from {args.SOURCE}")
subscription_id = transport.subscribe(
args.SOURCE, receive_message, acknowledgement=True
)
if args.DEST == ".":
print("Writing messages to automatically determined destinations")
else:
print(f"Writing messages to {args.DEST}")
message_count = 0
header_filter = frozenset(
{
"content-length",
"destination",
"expires",
"message-id",
"original-destination",
"originalExpiration",
"subscription",
"timestamp",
"redelivered",
}
)
drain_start = time.time()
idle_time = 0
try:
while True:
try:
header, message = messages.get(True, 0.1)
except queue.Empty:
idle_time = idle_time + 0.1
if args.stop and idle_time > args.stop:
break
continue
idle_time = 0
print()
try:
print(
"Message date: {:%Y-%m-%d %H:%M:%S}".format(
datetime.fromtimestamp(int(header["timestamp"]) / 1000)
)
)
except Exception:
pass
target_queue = args.DEST
try:
print(f"Recipe ID: {message['environment']['ID']}")
r = workflows.recipe.wrapper.RecipeWrapper(message=message)
if target_queue == ".":
target_queue = r.recipe_step["queue"]
print(f"Target Queue: {target_queue}")
additional_info_function = show_additional_info.get(target_queue)
if additional_info_function:
additional_info_function(r.recipe_step)
except Exception:
pass
if target_queue == ".":
exit("Could not determine target queue for message")
new_headers = {
key: header[key] for key in header if key not in header_filter
}
txn = transport.transaction_begin(subscription_id=subscription_id)
transport.send(target_queue, message, headers=new_headers, transaction=txn)
transport.ack(header, transaction=txn)
transport.transaction_commit(txn)
message_count = message_count + 1
print(
"%4d message(s) drained in %.1f seconds"
% (message_count, time.time() - drain_start)
)
time.sleep(args.wait)
except KeyboardInterrupt:
sys.exit(
"\nCancelling, %d message(s) drained, %d message(s) unprocessed in memory"
% (message_count, messages.qsize())
)
print(
"%d message(s) drained, no message seen for %.1f seconds"
% (message_count, idle_time)
)
transport.disconnect()
| 29.962963
| 87
| 0.57396
|
68f7c5867b770849f72c0aadaf6b584916c1b033
| 1,257
|
py
|
Python
|
venv/lib/python3.7/site-packages/datalad/tests/test_installed.py
|
emmetaobrien/dats-validator
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/datalad/tests/test_installed.py
|
emmetaobrien/dats-validator
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/datalad/tests/test_installed.py
|
emmetaobrien/dats-validator
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test invocation of datalad utilities "as is installed"
"""
from mock import patch
from .utils import ok_startswith, eq_, assert_cwd_unchanged
from datalad.cmd import Runner
from datalad.support.exceptions import CommandError
def check_run_and_get_output(cmd):
runner = Runner()
try:
# suppress log output happen it was set to high values
with patch.dict('os.environ', {'DATALAD_LOG_LEVEL': 'WARN'}):
output = runner.run(["datalad", "--help"])
except CommandError as e:
raise AssertionError("'datalad --help' failed to start normally. "
"Exited with %d and output %s" % (e.code, (e.stdout, e.stderr)))
return output
@assert_cwd_unchanged
def test_run_datalad_help():
out, err = check_run_and_get_output("datalad --help")
ok_startswith(out, "Usage: ")
eq_(err, "")
| 36.970588
| 93
| 0.588703
|
cc597254aecb7628314a64896988e3697e1008b5
| 1,753
|
py
|
Python
|
retinex/utils.py
|
Mikehem/ImageEnhancement
|
6e2df5b8e3d9147e0109f72204425f0fbebe0ae0
|
[
"MIT"
] | null | null | null |
retinex/utils.py
|
Mikehem/ImageEnhancement
|
6e2df5b8e3d9147e0109f72204425f0fbebe0ae0
|
[
"MIT"
] | null | null | null |
retinex/utils.py
|
Mikehem/ImageEnhancement
|
6e2df5b8e3d9147e0109f72204425f0fbebe0ae0
|
[
"MIT"
] | null | null | null |
import numpy as np
from PIL import Image
def data_augmentation(image, mode):
if mode == 0:
# original
return image
elif mode == 1:
# flip up and down
return np.flipud(image)
elif mode == 2:
# rotate counterwise 90 degree
return np.rot90(image)
elif mode == 3:
# rotate 90 degree and flip up and down
image = np.rot90(image)
return np.flipud(image)
elif mode == 4:
# rotate 180 degree
return np.rot90(image, k=2)
elif mode == 5:
# rotate 180 degree and flip
image = np.rot90(image, k=2)
return np.flipud(image)
elif mode == 6:
# rotate 270 degree
return np.rot90(image, k=3)
elif mode == 7:
# rotate 270 degree and flip
image = np.rot90(image, k=3)
return np.flipud(image)
def load_images(file):
im = Image.open(file)
return np.array(im, dtype="float32") / 255.0
def save_images(filepath, result_1, result_2 = None):
result_1 = np.squeeze(result_1)
result_2 = np.squeeze(result_2)
if not result_2.any():
cat_image = result_1
else:
cat_image = np.concatenate([result_1, result_2], axis = 1)
im = Image.fromarray(np.clip(cat_image * 255.0, 0, 255.0).astype('uint8'))
im.save(filepath, 'png')
def save_images(filepath, result_1, result_2 = None):
result_1 = np.squeeze(result_1)
result_2 = np.squeeze(result_2)
if not result_2.any():
cat_image = result_1
else:
cat_image = np.concatenate([result_1, result_2], axis = 1)
im = Image.fromarray(np.clip(cat_image * 255.0, 0, 255.0).astype('uint8'))
im.save(filepath, 'png')
| 29.711864
| 79
| 0.585282
|
f928c3d0c1f38bfbd894e51617927851be4b6ccd
| 2,109
|
py
|
Python
|
tests/handlers/test_widgets.py
|
hgs847825/redash
|
11d09b2f0945fb90898b68b730d12e7a52190251
|
[
"BSD-2-Clause"
] | 4
|
2018-07-31T09:39:33.000Z
|
2019-05-22T23:56:18.000Z
|
tests/handlers/test_widgets.py
|
hgs847825/redash
|
11d09b2f0945fb90898b68b730d12e7a52190251
|
[
"BSD-2-Clause"
] | 1
|
2018-04-16T20:27:32.000Z
|
2018-04-16T20:27:32.000Z
|
tests/handlers/test_widgets.py
|
hgs847825/redash
|
11d09b2f0945fb90898b68b730d12e7a52190251
|
[
"BSD-2-Clause"
] | 1
|
2018-04-16T13:08:44.000Z
|
2018-04-16T13:08:44.000Z
|
from tests import BaseTestCase
from redash import models
class WidgetAPITest(BaseTestCase):
def create_widget(self, dashboard, visualization, width=1):
data = {
'visualization_id': visualization.id,
'dashboard_id': dashboard.id,
'options': {},
'width': width
}
rv = self.make_request('post', '/api/widgets', data=data)
return rv
def test_create_widget(self):
dashboard = self.factory.create_dashboard()
vis = self.factory.create_visualization()
rv = self.create_widget(dashboard, vis)
self.assertEquals(rv.status_code, 200)
def test_wont_create_widget_for_visualization_you_dont_have_access_to(self):
dashboard = self.factory.create_dashboard()
vis = self.factory.create_visualization()
ds = self.factory.create_data_source(group=self.factory.create_group())
vis.query_rel.data_source = ds
models.db.session.add(vis.query_rel)
data = {
'visualization_id': vis.id,
'dashboard_id': dashboard.id,
'options': {},
'width': 1
}
rv = self.make_request('post', '/api/widgets', data=data)
self.assertEqual(rv.status_code, 403)
def test_create_text_widget(self):
dashboard = self.factory.create_dashboard()
data = {
'visualization_id': None,
'text': 'Sample text.',
'dashboard_id': dashboard.id,
'options': {},
'width': 2
}
rv = self.make_request('post', '/api/widgets', data=data)
self.assertEquals(rv.status_code, 200)
self.assertEquals(rv.json['widget']['text'], 'Sample text.')
def test_delete_widget(self):
widget = self.factory.create_widget()
rv = self.make_request('delete', '/api/widgets/{0}'.format(widget.id))
self.assertEquals(rv.status_code, 200)
dashboard = models.Dashboard.get_by_slug_and_org(widget.dashboard.slug, widget.dashboard.org)
self.assertEquals(dashboard.widgets.count(), 0)
| 31.477612
| 101
| 0.618303
|
db4dbb5228562e02fd73181174b2bae679b4efcb
| 3,850
|
py
|
Python
|
profiles_api/views.py
|
leonsolon/profiles-rest-api
|
591dd921d0a207a6fed8d46618f0eb875300b932
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
leonsolon/profiles-rest-api
|
591dd921d0a207a6fed8d46618f0eb875300b932
|
[
"MIT"
] | 7
|
2020-06-06T01:44:24.000Z
|
2022-02-10T10:27:43.000Z
|
profiles_api/views.py
|
leonsolon/profiles-rest-api
|
591dd921d0a207a6fed8d46618f0eb875300b932
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from profiles_api import permissions
from profiles_api import serializers
from profiles_api import models
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a lista of APIView features - endpoints"""
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializers = self.serializer_class(data=request.data)
if serializers.is_valid():
name = serializers.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT' })
def patch(self, request, pk=None):
"""Handle partial update an object"""
return Response({'method': 'PATCH' })
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE' })
"""ViewSet its different from http methods """
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello method"""
a_viewset = [
'Uses actions (lista, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello', 'a_viewset': a_viewset})
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.error,
status = status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting object by id"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating object by id"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of object by id"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
| 33.77193
| 77
| 0.652208
|
aca87a5ee811911ea4b5662f915bf679231c9032
| 14,490
|
py
|
Python
|
vat.py
|
csterling/TorchSSL
|
b7478424abfdee925da8f013391e598bfc61c720
|
[
"MIT"
] | null | null | null |
vat.py
|
csterling/TorchSSL
|
b7478424abfdee925da8f013391e598bfc61c720
|
[
"MIT"
] | null | null | null |
vat.py
|
csterling/TorchSSL
|
b7478424abfdee925da8f013391e598bfc61c720
|
[
"MIT"
] | null | null | null |
# import needed library
import os
import logging
import random
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
from utils import net_builder, get_logger, count_parameters, over_write_args_from_file
from train_utils import TBLog, get_optimizer, get_cosine_schedule_with_warmup
from models.vat.vat import Vat
from datasets.ssl_dataset import SSL_Dataset
from datasets.data_utils import get_data_loader
def main(args):
'''
For (Distributed)DataParallelism,
main(args) spawn each process (main_worker) to each GPU.
'''
save_path = os.path.join(args.save_dir, args.save_name)
if os.path.exists(save_path) and args.overwrite:
import shutil
shutil.rmtree(save_path)
if os.path.exists(save_path) and not args.overwrite:
raise Exception('already existing model: {}'.format(save_path))
if args.resume:
if args.load_path is None:
raise Exception('Resume of training requires --load_path in the args')
if os.path.abspath(save_path) == os.path.abspath(args.load_path) and not args.overwrite:
raise Exception('Saving & Loading pathes are same. \
If you want over-write, give --overwrite in the argument.')
if args.seed is not None:
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
# distributed: true if manually selected or if world_size > 1
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count() # number of gpus of each node
if args.multiprocessing_distributed:
# now, args.world_size means num of total processes in all nodes
args.world_size = ngpus_per_node * args.world_size
# args=(,) means the arguments of main_worker
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
'''
main_worker is conducted on each GPU.
'''
global best_acc1
args.gpu = gpu
# random seed has to be set for the syncronization of labeled data sampling in each process.
assert args.seed is not None
random.seed(args.seed )
torch.manual_seed(args.seed )
np.random.seed(args.seed )
cudnn.deterministic = True
# SET UP FOR DISTRIBUTED TRAINING
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
args.rank = args.rank * ngpus_per_node + gpu # compute global rank
# set distributed group:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# SET save_path and logger
save_path = os.path.join(args.save_dir, args.save_name)
logger_level = "WARNING"
tb_log = None
if args.rank % ngpus_per_node == 0:
tb_log = TBLog(save_path, 'tensorboard', use_tensorboard=args.use_tensorboard)
logger_level = "INFO"
logger = get_logger(args.save_name, save_path, logger_level)
logger.warning(f"USE GPU: {args.gpu} for training")
args.bn_momentum = 1.0 - 0.999
if 'imagenet' in args.dataset.lower():
_net_builder = net_builder('ResNet50', False, None, is_remix=False)
else:
_net_builder = net_builder(args.net,
args.net_from_name,
{'first_stride': 2 if 'stl' in args.dataset else 1,
'depth': args.depth,
'widen_factor': args.widen_factor,
'leaky_slope': args.leaky_slope,
'bn_momentum': args.bn_momentum,
'dropRate': args.dropout,
'use_embed': False,
'is_remix': False},
)
model = Vat(_net_builder,
args.num_classes,
args.vat_weight,
num_eval_iter=args.num_eval_iter,
tb_log=tb_log,
ema_m=args.ema_m,
logger=logger)
logger.info(f'Number of Trainable Params: {count_parameters(model.model)}')
# SET Optimizer & LR Scheduler
## construct SGD and cosine lr scheduler
optimizer = get_optimizer(model.model, args.optim, args.lr, args.momentum, args.weight_decay)
scheduler = get_cosine_schedule_with_warmup(optimizer,
args.num_train_iter,
num_warmup_steps=args.num_train_iter * 0)
## set SGD and cosine lr
model.set_optimizer(optimizer, scheduler)
# SET Devices for (Distributed) DataParallel
if not torch.cuda.is_available():
raise Exception('ONLY GPU TRAINING IS SUPPORTED')
elif args.distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
'''
batch_size: batch_size per node -> batch_size per gpu
workers: workers per node -> workers per gpu
'''
args.batch_size = int(args.batch_size / ngpus_per_node)
model.model.cuda(args.gpu)
model.model = nn.SyncBatchNorm.convert_sync_batchnorm(model.model)
model.model = torch.nn.parallel.DistributedDataParallel(model.model,
device_ids=[args.gpu],
broadcast_buffers=False,
find_unused_parameters=True)
else:
# if arg.gpu is None, DDP will divide and allocate batch_size
# to all available GPUs if device_ids are not set.
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.model = model.model.cuda(args.gpu)
else:
model.model = torch.nn.DataParallel(model.model).cuda()
logger.info(f"model_arch: {model}")
logger.info(f"Arguments: {args}")
cudnn.benchmark = True
if args.rank != 0:
torch.distributed.barrier()
# Construct Dataset & DataLoader
train_dset = SSL_Dataset(args, alg='vat', name=args.dataset, train=True,
num_classes=args.num_classes, data_dir=args.data_dir)
lb_dset, ulb_dset = train_dset.get_ssl_dset(args.num_labels)
_eval_dset = SSL_Dataset(args, alg='vat', name=args.dataset, train=False,
num_classes=args.num_classes, data_dir=args.data_dir)
eval_dset = _eval_dset.get_dset()
if args.rank == 0:
torch.distributed.barrier()
loader_dict = {}
dset_dict = {'train_lb': lb_dset, 'train_ulb': ulb_dset, 'eval': eval_dset}
loader_dict['train_lb'] = get_data_loader(dset_dict['train_lb'],
args.batch_size,
data_sampler=args.train_sampler,
num_iters=args.num_train_iter,
num_workers=args.num_workers,
distributed=args.distributed)
loader_dict['train_ulb'] = get_data_loader(dset_dict['train_ulb'],
args.batch_size * args.uratio,
data_sampler=args.train_sampler,
num_iters=args.num_train_iter,
num_workers=4 * args.num_workers,
distributed=args.distributed)
loader_dict['eval'] = get_data_loader(dset_dict['eval'],
args.eval_batch_size,
num_workers=args.num_workers,
drop_last=False)
## set DataLoader
model.set_data_loader(loader_dict)
# If args.resume, load checkpoints from args.load_path
if args.resume:
model.load_model(args.load_path)
# START TRAINING
trainer = model.train
for epoch in range(args.epoch):
trainer(args)
if not args.multiprocessing_distributed or \
(args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
model.save_model('latest_model.pth', save_path)
logging.warning(f"GPU {args.rank} training is FINISHED")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='')
'''
Saving & loading of the model.
'''
parser.add_argument('--save_dir', type=str, default='./saved_models')
parser.add_argument('--save_name', type=str, default='vat')
parser.add_argument('--resume', type=str2bool, default=False)
parser.add_argument('--load_path', type=str, default=None)
parser.add_argument('--overwrite', type=str2bool, default=False)
parser.add_argument('--use_tensorboard', action='store_true', help='Use tensorboard to plot and save curves, otherwise save the curves locally.')
'''
Training Configuration of Vat
'''
parser.add_argument('--epoch', type=int, default=1)
parser.add_argument('--num_train_iter', type=int, default=2 ** 20,
help='total number of training iterations')
parser.add_argument('--num_eval_iter', type=int, default=1000,
help='evaluation frequency')
parser.add_argument('--unsup_warmup_pos', type=float, default=0.4,
help='Relative position at which constraint loss warmup ends.')
parser.add_argument('--num_labels', type=int, default=4000)
parser.add_argument('--batch_size', type=int, default=64,
help='total number of batch size of labeled data')
parser.add_argument('--uratio', type=int, default=1,
help='the ratio of unlabeled data to labeld data in each mini-batch')
parser.add_argument('--eval_batch_size', type=int, default=1024,
help='batch size of evaluation data loader (it does not affect the accuracy)')
parser.add_argument('--ema_m', type=float, default=0.999)
parser.add_argument('--vat_weight', type=float, default=0.3)
parser.add_argument('--entmin_weight', type=float, default=0.06, help='Entropy minimization weight')
parser.add_argument('--vat_eps', type=float, default=6, help='VAT perturbation size.')
'''
Optimizer configurations
'''
parser.add_argument('--optim', type=str, default='SGD')
parser.add_argument('--lr', type=float, default=3e-2)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--amp', type=str2bool, default=False, help='use mixed precision training or not')
parser.add_argument('--clip', type=float, default=0)
'''
Backbone Net Configurations
'''
parser.add_argument('--net', type=str, default='WideResNet')
parser.add_argument('--net_from_name', type=str2bool, default=False)
parser.add_argument('--depth', type=int, default=28)
parser.add_argument('--widen_factor', type=int, default=2)
parser.add_argument('--leaky_slope', type=float, default=0.1)
parser.add_argument('--dropout', type=float, default=0.0)
'''
Data Configurations
'''
parser.add_argument('--data_dir', type=str, default='./data')
parser.add_argument('--dataset', type=str, default='cifar10')
parser.add_argument('--train_sampler', type=str, default='RandomSampler')
parser.add_argument('--num_classes', type=int, default=10)
parser.add_argument('--num_workers', type=int, default=1)
'''
multi-GPUs & Distrbitued Training
'''
## args for distributed training (from https://github.com/pytorch/examples/blob/master/imagenet/main.py)
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='**node rank** for distributed training')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:10001', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=0, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', type=str2bool, default=True,
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
# config file
parser.add_argument('--c', type=str, default='')
args = parser.parse_args()
over_write_args_from_file(args, args.c)
main(args)
| 42.997033
| 149
| 0.60628
|
78f33a21132f9bfe6d6d36aa79e4209e8be277be
| 1,497
|
py
|
Python
|
examples/deep-learning-notes-and-labs/13_Transfer_Learning/alexnet-feature-extraction-lab/feature_extraction.py
|
kcavagnolo/udacity_selfdrive
|
1e1e884c3f82eec476ccc7b4f4dd6b54f48d032e
|
[
"MIT"
] | null | null | null |
examples/deep-learning-notes-and-labs/13_Transfer_Learning/alexnet-feature-extraction-lab/feature_extraction.py
|
kcavagnolo/udacity_selfdrive
|
1e1e884c3f82eec476ccc7b4f4dd6b54f48d032e
|
[
"MIT"
] | null | null | null |
examples/deep-learning-notes-and-labs/13_Transfer_Learning/alexnet-feature-extraction-lab/feature_extraction.py
|
kcavagnolo/udacity_selfdrive
|
1e1e884c3f82eec476ccc7b4f4dd6b54f48d032e
|
[
"MIT"
] | null | null | null |
import time
import tensorflow as tf
import numpy as np
import pandas as pd
from scipy.misc import imread
from alexnet import AlexNet
sign_names = pd.read_csv('signnames.csv')
nb_classes = 43
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
resized = tf.image.resize_images(x, 227, 227)
# NOTE: By setting `feature_extract` to `True` we return
# the second to last layer.
fc7 = AlexNet(resized, feature_extract=True)
# TODO: Define a new fully connected layer followed by a softmax activation to classify
# the traffic signs. Assign the result of the softmax activation to `probs` below.
shape = (fc7.get_shape().as_list()[-1], nb_classes) # use this shape for the weight matrix
fc8W = tf.Variable(tf.truncated_normal(shape, stddev=1e-2))
fc8b = tf.Variable(tf.zeros(nb_classes))
logits = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
probs = tf.nn.softmax(logits)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# Read Images
im1 = imread("construction.jpg").astype(np.float32)
im1 = im1 - np.mean(im1)
im2 = imread("stop.jpg").astype(np.float32)
im2 = im2 - np.mean(im2)
# Run Inference
t = time.time()
output = sess.run(probs, feed_dict={x: [im1, im2]})
# Print Output
for input_im_ind in range(output.shape[0]):
inds = np.argsort(output)[input_im_ind, :]
print("Image", input_im_ind)
for i in range(5):
print("%s: %.3f" % (sign_names.ix[inds[-1 - i]][1], output[input_im_ind, inds[-1 - i]]))
print()
print("Time: %.3f seconds" % (time.time() - t))
| 30.55102
| 96
| 0.707415
|
623a2b5929d66a7f41246694d41ac02f9c4f914a
| 10,319
|
py
|
Python
|
deependrequestlog/settings/default_example.py
|
alikins/django-deepend-request-log
|
4c28e9843cc73fa2c8d3354700be066f0fe388e2
|
[
"MIT"
] | null | null | null |
deependrequestlog/settings/default_example.py
|
alikins/django-deepend-request-log
|
4c28e9843cc73fa2c8d3354700be066f0fe388e2
|
[
"MIT"
] | null | null | null |
deependrequestlog/settings/default_example.py
|
alikins/django-deepend-request-log
|
4c28e9843cc73fa2c8d3354700be066f0fe388e2
|
[
"MIT"
] | null | null | null |
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
# Django settings for galaxy project.
import os
import djcelery
djcelery.setup_loader()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
# =========================================================
# Django Core Settings
# =========================================================
DEBUG = False
ALLOWED_HOSTS = []
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
SITE_ID = 1
# Application definition
# ---------------------------------------------------------
PROMETHEUS_EXPORT_MIGRATIONS = False
# TODO(cutwater): Review 3rd party apps usage
INSTALLED_APPS = (
# Django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
# Allauth apps
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
# 3rd part apps
'bootstrapform',
'django_prometheus',
'djcelery',
'rest_framework',
'rest_framework.authtoken',
# Project apps
'galaxy.accounts',
'galaxy.main',
)
# FIXME(cutwater): Deprecated from Django 1.10, use MIDDLEWARE setting
# instead.
MIDDLEWARE = [
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'log_request_id.middleware.RequestIDMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'galaxy.middleware.log_request.LogRequestMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'galaxy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'galaxy', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.static',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'galaxy.wsgi.application'
# Authentication
# ---------------------------------------------------------
AUTHENTICATION_BACKENDS = (
# Required for login by username in Django admin
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
AUTH_USER_MODEL = 'accounts.CustomUser'
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/home'
# Sessions
# ---------------------------------------------------------
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
SESSION_SAVE_EVERY_REQUEST = True
# Security
# ---------------------------------------------------------
# SECURITY WARNING: Use unique key in production and keep it secret!
SECRET_KEY = '+^b03*zldz4fd!p%asz+(8u8b-0#6uw4eaex0xf$3w-km%)&2y'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Internationalization
# ---------------------------------------------------------
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files
# ---------------------------------------------------------
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'build', 'static')
# Database
# ---------------------------------------------------------
DATABASES = {}
# =========================================================
# Third Party Apps Settings
# =========================================================
# Rest Framework
# ---------------------------------------------------------
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'galaxy.api.permissions.ModelAccessPermission',
),
# TODO(cutwater): Update production settings
'DEFAULT_PAGINATION_CLASS':
'galaxy.api.pagination.PageNumberPagination',
'DEFAULT_FILTER_BACKENDS': (
'galaxy.api.filters.ActiveOnlyBackend',
'galaxy.api.filters.FieldLookupBackend',
'rest_framework.filters.SearchFilter',
'galaxy.api.filters.OrderByBackend',
),
}
# Celery
# ---------------------------------------------------------
BROKER_URL = None
CELERY_IMPORTS = (
'galaxy.main.celerytasks',
'galaxy.worker.tasks',
)
CELERY_TRACK_STARTED = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
# Allauth
# ---------------------------------------------------------
ACCOUNT_ADAPTER = 'galaxy.main.auth.AccountAdapter'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'http'
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/accounts/profile/'
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/accounts/landing'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 1
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_EMAIL_SUBJECT_PREFIX = 'Ansible Galaxy '
ACCOUNT_LOGIN_ON_VALIDATE = True
ACCOUNT_USERNAME_MIN_LENGTH = 3
ACCOUNT_EMAIL_CONFIRMATION_HMAC = False
ACCOUNT_USERNAME_BLACKLIST = (
'admin',
'administrator',
'galaxy_admin',
'accounts',
'account',
'explore',
'intro',
'list',
'detail',
'roleadd',
'imports',
'authors',
)
ACCOUNT_PASSWORD_MIN_LENGTH = 8
SOCIALACCOUNT_PROVIDERS = {
'twitter': {
'SCOPE': ['r_emailaddress']
},
'github': {
'SCOPE': ['user:email', 'public_repo', 'read:org']
},
}
SOCIALACCOUNT_AVATAR_SUPPORT = True
# =========================================================
# Galaxy Settings
# =========================================================
# TODO(cutwater): Parameters description required
GITHUB_TASK_USERS = []
GITHUB_SERVER = 'https://api.github.com'
GALAXY_COMMENTS_THRESHOLD = 10.0
SITE_ENV = 'PROD'
SITE_NAME = 'localhost'
# TODO(cutwater): Consider removing wait_for from settings
WAIT_FOR = []
ADMIN_URL_PATH = 'admin'
ADMIN_URL_PATTERN = r'^{}/'.format(ADMIN_URL_PATH)
ROLE_TYPES_ENABLED = frozenset(['ANS', 'CON', 'APP'])
# A base directory used by repository import task to clone repositories into.
# If set to `None`, system temporary directory is used.
CONTENT_DOWNLOAD_DIR = None
# =========================================================
# InfluxDB Settings
# =========================================================
INFLUX_DB_HOST = 'influxdb'
INFLUX_DB_PORT = 8086
INFLUX_DB_USERNAME = 'admin'
INFLUX_DB_PASSWORD = 'admin'
INFLUX_DB_UI_EVENTS_DB_NAME = 'galaxy_metrics'
# Number of data points to buffer before galaxy writes them to influx.
# Higher numbers mean more efficient influx inserts, but it also means that
# more data will potentially be lost when galaxy restarts.
INFLUX_INSERT_BUFFER_COUNT = 1
GALAXY_METRICS_ENABLED = True
# =========================================================
# Domain Settings
# =========================================================
GALAXY_URL = 'http://{site}:8000'
# =========================================================
# Notification Settings
# =========================================================
GALAXY_NOTIFICATION_EMAIL = 'notifications@galaxy.ansible.com'
DEFAULT_FROM_EMAIL = 'noreply@galaxy.ansible.com'
# =========================================================
# Logging Settings
# =========================================================
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'json': {
'()': 'jog.JogFormatter',
'format': ('%(asctime)s %(request_id)s %(levelname)s '
'%(name)s: %(message)s'),
},
'verbose': {
'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'request_id': {
'()': 'log_request_id.filters.RequestIDFilter'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'import_task': {
'level': 'DEBUG',
'class': 'galaxy.common.logutils.ImportTaskHandler',
'formatter': 'simple',
}
},
'loggers': {
# Django loggers
'django': {
'level': 'INFO',
'handlers': ['console'],
},
# Galaxy logger
'galaxy': {
'level': 'DEBUG',
'handlers': ['console'],
# NOTE(cutwater): Setting propage=False prevents galaxy logs
# to be handled by root logger, which is customized by
# celery.
'propagate': False,
},
# A special logger, that sends task logs to the database
'galaxy.worker.tasks.import_repository': {
'level': 'INFO',
'handlers': ['import_task'],
'propagate': False,
},
}
}
| 25.992443
| 77
| 0.577769
|
0c945ce4dbee37ed57ace0e11dd8b801a167b890
| 1,436
|
py
|
Python
|
setup.py
|
byrnereese/mkdocs-swagger-ui
|
5c8bddfb37dbe153ed42f7eaf56c85abd29f165f
|
[
"MIT"
] | 3
|
2021-07-31T23:48:30.000Z
|
2022-02-08T17:03:10.000Z
|
setup.py
|
byrnereese/mkdocs-swagger-ui
|
5c8bddfb37dbe153ed42f7eaf56c85abd29f165f
|
[
"MIT"
] | 1
|
2021-07-31T23:57:41.000Z
|
2021-07-31T23:57:41.000Z
|
setup.py
|
byrnereese/mkdocs-swagger-ui
|
5c8bddfb37dbe153ed42f7eaf56c85abd29f165f
|
[
"MIT"
] | 1
|
2021-10-07T10:14:17.000Z
|
2021-10-07T10:14:17.000Z
|
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='mkdocs-swagger-ui',
version='0.1.0',
description='An MkDocs plugin to generate a markdown file containing an API reference built using Swagger UI from a base OAS3 specification.',
long_description=long_description,
long_description_content_type="text/markdown",
keywords='mkdocs swagger api documentation',
url='https://github.com/byrnereese/mkdocs-swagger-ui',
author='Byrne Reese',
author_email='byrne@majordojo.com',
license='MIT',
python_requires='>=3.0',
install_requires=[
'pyyaml',
'jinja2',
'markdown',
'mkdocs>=1.0.4'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
packages=find_packages(),
entry_points={
'mkdocs.plugins': [
'swagger-ui = mkdocs_swagger_ui_plugin.plugin:SwaggerUIPlugin'
]
}
)
| 33.395349
| 146
| 0.626741
|
551fa54842a183456346405b185bf060260e5a58
| 18,357
|
py
|
Python
|
tests/unit/lib/sync/flows/test_layer_sync_flow.py
|
aubelsb2/aws-sam-cli
|
13e80fef8c9e53ec35a500ab0ae0ab873e22a56d
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
tests/unit/lib/sync/flows/test_layer_sync_flow.py
|
aubelsb2/aws-sam-cli
|
13e80fef8c9e53ec35a500ab0ae0ab873e22a56d
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
tests/unit/lib/sync/flows/test_layer_sync_flow.py
|
aubelsb2/aws-sam-cli
|
13e80fef8c9e53ec35a500ab0ae0ab873e22a56d
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-03-18T18:16:23.000Z
|
2020-03-18T18:16:23.000Z
|
import base64
import hashlib
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch, call, ANY, mock_open, PropertyMock
from parameterized import parameterized
from samcli.lib.sync.exceptions import MissingPhysicalResourceError, NoLayerVersionsFoundError
from samcli.lib.sync.flows.layer_sync_flow import LayerSyncFlow, FunctionLayerReferenceSync
from samcli.lib.sync.sync_flow import SyncFlow
class TestLayerSyncFlow(TestCase):
def setUp(self):
self.layer_identifier = "LayerA"
self.build_context_mock = Mock()
self.deploy_context_mock = Mock()
self.layer_sync_flow = LayerSyncFlow(
self.layer_identifier,
self.build_context_mock,
self.deploy_context_mock,
{self.layer_identifier: "layer_version_arn"},
[],
)
def test_setup(self):
with patch.object(self.layer_sync_flow, "_session") as patched_session:
with patch.object(SyncFlow, "set_up") as patched_super_setup:
self.layer_sync_flow.set_up()
patched_super_setup.assert_called_once()
patched_session.assert_has_calls(
[
call.client("lambda"),
]
)
@patch("samcli.lib.sync.flows.layer_sync_flow.get_resource_by_id")
def test_setup_with_serverless_layer(self, get_resource_by_id_mock):
given_layer_name_with_hashes = f"{self.layer_identifier}abcdefghij"
self.layer_sync_flow._physical_id_mapping = {given_layer_name_with_hashes: "layer_version_arn"}
get_resource_by_id_mock.return_value = False
with patch.object(self.layer_sync_flow, "_session") as patched_session:
with patch.object(SyncFlow, "set_up") as patched_super_setup:
self.layer_sync_flow.set_up()
patched_super_setup.assert_called_once()
patched_session.assert_has_calls(
[
call.client("lambda"),
]
)
self.assertEqual(self.layer_sync_flow._layer_arn, "layer_version_arn")
def test_setup_with_unknown_layer(self):
given_layer_name_with_hashes = f"SomeOtherLayerabcdefghij"
self.layer_sync_flow._physical_id_mapping = {given_layer_name_with_hashes: "layer_version_arn"}
with patch.object(self.layer_sync_flow, "_session") as _:
with patch.object(SyncFlow, "set_up") as _:
with self.assertRaises(MissingPhysicalResourceError):
self.layer_sync_flow.set_up()
@patch("samcli.lib.sync.flows.layer_sync_flow.ApplicationBuilder")
@patch("samcli.lib.sync.flows.layer_sync_flow.tempfile")
@patch("samcli.lib.sync.flows.layer_sync_flow.make_zip")
@patch("samcli.lib.sync.flows.layer_sync_flow.file_checksum")
@patch("samcli.lib.sync.flows.layer_sync_flow.os")
def test_setup_gather_resources(
self, patched_os, patched_file_checksum, patched_make_zip, patched_tempfile, patched_app_builder
):
given_collect_build_resources = Mock()
self.build_context_mock.collect_build_resources.return_value = given_collect_build_resources
given_app_builder = Mock()
given_artifact_folder = Mock()
given_app_builder.build().artifacts.get.return_value = given_artifact_folder
patched_app_builder.return_value = given_app_builder
given_zip_location = Mock()
patched_make_zip.return_value = given_zip_location
given_file_checksum = Mock()
patched_file_checksum.return_value = given_file_checksum
self.layer_sync_flow._get_lock_chain = MagicMock()
self.layer_sync_flow.gather_resources()
self.build_context_mock.collect_build_resources.assert_called_with(self.layer_identifier)
patched_app_builder.assert_called_with(
given_collect_build_resources,
self.build_context_mock.build_dir,
self.build_context_mock.base_dir,
self.build_context_mock.cache_dir,
cached=True,
is_building_specific_resource=True,
manifest_path_override=self.build_context_mock.manifest_path_override,
container_manager=self.build_context_mock.container_manager,
mode=self.build_context_mock.mode,
)
patched_tempfile.gettempdir.assert_called_once()
patched_os.path.join.assert_called_with(ANY, ANY)
patched_make_zip.assert_called_with(ANY, self.layer_sync_flow._artifact_folder)
patched_file_checksum.assert_called_with(ANY, ANY)
self.assertEqual(self.layer_sync_flow._artifact_folder, given_artifact_folder)
self.assertEqual(self.layer_sync_flow._zip_file, given_zip_location)
self.assertEqual(self.layer_sync_flow._local_sha, given_file_checksum)
self.layer_sync_flow._get_lock_chain.assert_called_once()
self.layer_sync_flow._get_lock_chain.return_value.__enter__.assert_called_once()
self.layer_sync_flow._get_lock_chain.return_value.__exit__.assert_called_once()
def test_compare_remote(self):
given_lambda_client = Mock()
self.layer_sync_flow._lambda_client = given_lambda_client
given_sha256 = base64.b64encode(b"checksum")
given_layer_info = {"Content": {"CodeSha256": given_sha256}}
given_lambda_client.get_layer_version.return_value = given_layer_info
self.layer_sync_flow._local_sha = base64.b64decode(given_sha256).hex()
with patch.object(self.layer_sync_flow, "_get_latest_layer_version") as patched_get_latest_layer_version:
given_layer_name = Mock()
given_latest_layer_version = Mock()
self.layer_sync_flow._layer_arn = given_layer_name
patched_get_latest_layer_version.return_value = given_latest_layer_version
compare_result = self.layer_sync_flow.compare_remote()
self.assertTrue(compare_result)
def test_sync(self):
with patch.object(self.layer_sync_flow, "_publish_new_layer_version") as patched_publish_new_layer_version:
with patch.object(self.layer_sync_flow, "_delete_old_layer_version") as patched_delete_old_layer_version:
given_layer_version = Mock()
patched_publish_new_layer_version.return_value = given_layer_version
self.layer_sync_flow.sync()
self.assertEqual(self.layer_sync_flow._new_layer_version, given_layer_version)
patched_publish_new_layer_version.assert_called_once()
patched_delete_old_layer_version.assert_called_once()
def test_publish_new_layer_version(self):
given_layer_name = Mock()
given_lambda_client = Mock()
self.layer_sync_flow._lambda_client = given_lambda_client
given_zip_file = Mock()
self.layer_sync_flow._zip_file = given_zip_file
self.layer_sync_flow._layer_arn = given_layer_name
with patch.object(self.layer_sync_flow, "_get_resource") as patched_get_resource:
with patch("builtins.open", mock_open(read_data="data")) as mock_file:
given_publish_layer_result = {"Version": 24}
given_lambda_client.publish_layer_version.return_value = given_publish_layer_result
given_layer_resource = Mock()
patched_get_resource.return_value = given_layer_resource
result_version = self.layer_sync_flow._publish_new_layer_version()
patched_get_resource.assert_called_with(self.layer_identifier)
given_lambda_client.publish_layer_version.assert_called_with(
LayerName=given_layer_name,
Content={"ZipFile": "data"},
CompatibleRuntimes=given_layer_resource.get("Properties", {}).get("CompatibleRuntimes", []),
)
self.assertEqual(result_version, given_publish_layer_result.get("Version"))
def test_delete_old_layer_version(self):
given_layer_name = Mock()
given_layer_version = Mock()
given_lambda_client = Mock()
self.layer_sync_flow._lambda_client = given_lambda_client
self.layer_sync_flow._layer_arn = given_layer_name
self.layer_sync_flow._old_layer_version = given_layer_version
self.layer_sync_flow._delete_old_layer_version()
given_lambda_client.delete_layer_version.assert_called_with(
LayerName=given_layer_name, VersionNumber=given_layer_version
)
@patch("samcli.lib.sync.flows.layer_sync_flow.os")
@patch("samcli.lib.sync.flows.layer_sync_flow.SamFunctionProvider")
@patch("samcli.lib.sync.flows.layer_sync_flow.FunctionLayerReferenceSync")
def test_gather_dependencies(self, patched_function_ref_sync, patched_function_provider, os_mock):
self.layer_sync_flow._new_layer_version = "given_new_layer_version_arn"
given_function_provider = Mock()
patched_function_provider.return_value = given_function_provider
mock_some_random_layer = PropertyMock()
mock_some_random_layer.full_path = "SomeRandomLayer"
mock_given_layer = PropertyMock()
mock_given_layer.full_path = self.layer_identifier
mock_some_nested_layer = PropertyMock()
mock_some_nested_layer.full_path = "NestedStack1/" + self.layer_identifier
mock_function_a = PropertyMock(layers=[mock_some_random_layer])
mock_function_a.full_path = "FunctionA"
mock_function_b = PropertyMock(layers=[mock_given_layer])
mock_function_b.full_path = "FunctionB"
mock_function_c = PropertyMock(layers=[mock_some_nested_layer])
mock_function_c.full_path = "NestedStack1/FunctionC"
given_layers = [
mock_function_a,
mock_function_b,
mock_function_c,
]
given_function_provider.get_all.return_value = given_layers
self.layer_sync_flow._stacks = Mock()
given_layer_physical_name = Mock()
self.layer_sync_flow._layer_arn = given_layer_physical_name
self.layer_sync_flow._zip_file = Mock()
dependencies = self.layer_sync_flow.gather_dependencies()
patched_function_ref_sync.assert_called_once_with(
"FunctionB",
given_layer_physical_name,
self.layer_sync_flow._new_layer_version,
self.layer_sync_flow._build_context,
self.layer_sync_flow._deploy_context,
self.layer_sync_flow._physical_id_mapping,
self.layer_sync_flow._stacks,
)
self.assertEqual(len(dependencies), 1)
@patch("samcli.lib.sync.flows.layer_sync_flow.os")
@patch("samcli.lib.sync.flows.layer_sync_flow.SamFunctionProvider")
@patch("samcli.lib.sync.flows.layer_sync_flow.FunctionLayerReferenceSync")
def test_gather_dependencies_nested_stack(self, patched_function_ref_sync, patched_function_provider, os_mock):
self.layer_identifier = "NestedStack1/Layer1"
self.layer_sync_flow._layer_identifier = "NestedStack1/Layer1"
self.layer_sync_flow._new_layer_version = "given_new_layer_version_arn"
given_function_provider = Mock()
patched_function_provider.return_value = given_function_provider
mock_some_random_layer = PropertyMock()
mock_some_random_layer.full_path = "Layer1"
mock_given_layer = PropertyMock()
mock_given_layer.full_path = self.layer_identifier
mock_some_nested_layer = PropertyMock()
mock_some_nested_layer.full_path = "NestedStack1/Layer2"
mock_function_a = PropertyMock(layers=[mock_some_random_layer])
mock_function_a.full_path = "FunctionA"
mock_function_b = PropertyMock(layers=[mock_given_layer])
mock_function_b.full_path = "NestedStack1/FunctionB"
mock_function_c = PropertyMock(layers=[mock_some_nested_layer])
mock_function_c.full_path = "NestedStack1/FunctionC"
given_layers = [
mock_function_a,
mock_function_b,
mock_function_c,
]
given_function_provider.get_all.return_value = given_layers
self.layer_sync_flow._stacks = Mock()
given_layer_physical_name = Mock()
self.layer_sync_flow._layer_arn = given_layer_physical_name
self.layer_sync_flow._zip_file = Mock()
dependencies = self.layer_sync_flow.gather_dependencies()
patched_function_ref_sync.assert_called_once_with(
"NestedStack1/FunctionB",
given_layer_physical_name,
self.layer_sync_flow._new_layer_version,
self.layer_sync_flow._build_context,
self.layer_sync_flow._deploy_context,
self.layer_sync_flow._physical_id_mapping,
self.layer_sync_flow._stacks,
)
self.assertEqual(len(dependencies), 1)
def test_get_latest_layer_version(self):
given_version = Mock()
given_layer_name = Mock()
given_lambda_client = Mock()
given_lambda_client.list_layer_versions.return_value = {"LayerVersions": [{"Version": given_version}]}
self.layer_sync_flow._lambda_client = given_lambda_client
self.layer_sync_flow._layer_arn = given_layer_name
latest_layer_version = self.layer_sync_flow._get_latest_layer_version()
given_lambda_client.list_layer_versions.assert_called_with(LayerName=given_layer_name)
self.assertEqual(latest_layer_version, given_version)
def test_get_latest_layer_version_error(self):
given_layer_name = Mock()
given_lambda_client = Mock()
given_lambda_client.list_layer_versions.return_value = {"LayerVersions": []}
self.layer_sync_flow._lambda_client = given_lambda_client
self.layer_sync_flow._layer_arn = given_layer_name
with self.assertRaises(NoLayerVersionsFoundError):
self.layer_sync_flow._get_latest_layer_version()
def test_equality_keys(self):
self.assertEqual(self.layer_sync_flow._equality_keys(), self.layer_identifier)
@patch("samcli.lib.sync.flows.layer_sync_flow.ResourceAPICall")
def test_get_resource_api_calls(self, resource_api_call_mock):
result = self.layer_sync_flow._get_resource_api_calls()
self.assertEqual(len(result), 1)
resource_api_call_mock.assert_called_once_with(self.layer_identifier, ["Build"])
class TestFunctionLayerReferenceSync(TestCase):
def setUp(self):
self.function_identifier = "function"
self.layer_name = "Layer1"
self.old_layer_version = 1
self.new_layer_version = 2
self.function_layer_sync = FunctionLayerReferenceSync(
self.function_identifier, self.layer_name, self.new_layer_version, Mock(), Mock(), {}, []
)
def test_setup(self):
with patch.object(self.function_layer_sync, "_session") as patched_session:
with patch.object(SyncFlow, "set_up") as patched_super_setup:
self.function_layer_sync.set_up()
patched_super_setup.assert_called_once()
patched_session.assert_has_calls(
[
call.client("lambda"),
]
)
def test_sync(self):
given_lambda_client = Mock()
self.function_layer_sync._lambda_client = given_lambda_client
other_layer_version_arn = "SomeOtherLayerVersionArn"
given_function_result = {"Configuration": {"Layers": [{"Arn": "Layer1:1"}, {"Arn": other_layer_version_arn}]}}
given_lambda_client.get_function.return_value = given_function_result
with patch.object(self.function_layer_sync, "get_physical_id") as patched_get_physical_id:
with patch.object(self.function_layer_sync, "_locks") as patched_locks:
given_physical_id = Mock()
patched_get_physical_id.return_value = given_physical_id
self.function_layer_sync.sync()
patched_get_physical_id.assert_called_with(self.function_identifier)
patched_locks.get.assert_called_with(
SyncFlow._get_lock_key(
self.function_identifier, FunctionLayerReferenceSync.UPDATE_FUNCTION_CONFIGURATION
)
)
given_lambda_client.get_function.assert_called_with(FunctionName=given_physical_id)
given_lambda_client.update_function_configuration.assert_called_with(
FunctionName=given_physical_id, Layers=[other_layer_version_arn, "Layer1:2"]
)
def test_sync_with_existing_new_layer_version_arn(self):
given_lambda_client = Mock()
self.function_layer_sync._lambda_client = given_lambda_client
given_function_result = {"Configuration": {"Layers": [{"Arn": "Layer1:2"}]}}
given_lambda_client.get_function.return_value = given_function_result
with patch.object(self.function_layer_sync, "get_physical_id") as patched_get_physical_id:
with patch.object(self.function_layer_sync, "_locks") as patched_locks:
given_physical_id = Mock()
patched_get_physical_id.return_value = given_physical_id
self.function_layer_sync.sync()
patched_locks.get.assert_called_with(
SyncFlow._get_lock_key(
self.function_identifier, FunctionLayerReferenceSync.UPDATE_FUNCTION_CONFIGURATION
)
)
patched_get_physical_id.assert_called_with(self.function_identifier)
given_lambda_client.get_function.assert_called_with(FunctionName=given_physical_id)
given_lambda_client.update_function_configuration.assert_not_called()
def test_equality_keys(self):
self.assertEqual(
self.function_layer_sync._equality_keys(),
(self.function_identifier, self.layer_name, self.new_layer_version),
)
def test_compare_remote(self):
self.assertFalse(self.function_layer_sync.compare_remote())
def test_gather_dependencies(self):
self.assertEqual(self.function_layer_sync.gather_dependencies(), [])
| 42.591647
| 118
| 0.703274
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.