hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c77d4281c9775095f10601d396236916acd5699c
| 299
|
py
|
Python
|
Compiler/GC/program.py
|
lemonviv/Pivot-SPDZ
|
f3db87d8849e5f9fa39f321d85feec83107ee405
|
[
"BSD-2-Clause"
] | 429
|
2019-06-13T14:11:24.000Z
|
2022-03-30T18:54:31.000Z
|
Compiler/GC/program.py
|
lemonviv/Pivot-SPDZ
|
f3db87d8849e5f9fa39f321d85feec83107ee405
|
[
"BSD-2-Clause"
] | 471
|
2019-06-13T09:28:49.000Z
|
2022-03-31T17:46:03.000Z
|
Compiler/GC/program.py
|
lemonviv/Pivot-SPDZ
|
f3db87d8849e5f9fa39f321d85feec83107ee405
|
[
"BSD-2-Clause"
] | 162
|
2019-08-07T09:22:11.000Z
|
2022-03-31T11:24:26.000Z
|
from Compiler import types, instructions
class Program(object):
def __init__(self, progname):
types.program = self
instructions.program = self
self.curr_tape = None
exec(compile(open(progname).read(), progname, 'exec'))
def malloc(self, *args):
pass
| 27.181818
| 62
| 0.64214
|
4da46f7ecef048a43708de434965a3f14f57058a
| 269
|
py
|
Python
|
consultas/consultas/doctype/indices_inmunologicos/indices_inmunologicos.py
|
Lewinta/Consultas
|
e01ad870a2bad0eb5938d8800e3e2934402fce62
|
[
"MIT"
] | null | null | null |
consultas/consultas/doctype/indices_inmunologicos/indices_inmunologicos.py
|
Lewinta/Consultas
|
e01ad870a2bad0eb5938d8800e3e2934402fce62
|
[
"MIT"
] | null | null | null |
consultas/consultas/doctype/indices_inmunologicos/indices_inmunologicos.py
|
Lewinta/Consultas
|
e01ad870a2bad0eb5938d8800e3e2934402fce62
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Lewin Villar and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class IndicesInmunologicos(Document):
pass
| 24.454545
| 51
| 0.788104
|
09c7c44e0ef4499d5ead7cf23d6fb7e4d3725f96
| 427
|
py
|
Python
|
src/tt_logic/tt_logic/politic_power/formulas.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 85
|
2017-11-21T12:22:02.000Z
|
2022-03-27T23:07:17.000Z
|
src/tt_logic/tt_logic/politic_power/formulas.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 545
|
2017-11-04T14:15:04.000Z
|
2022-03-27T14:19:27.000Z
|
src/tt_logic/tt_logic/politic_power/formulas.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 45
|
2017-11-11T12:36:30.000Z
|
2022-02-25T06:10:44.000Z
|
import math
from . import constants as c
def base_quest_power(quest_rung: int) -> int:
return int(math.ceil(c.POWER_PER_QUEST * (1 + math.log(quest_rung, 2))))
def might_to_power(might: float) -> float:
if might < 1:
return 0
return (math.log(might, 10) / 3)
def power_modifier_from_freedom(freedom: float) -> float:
return freedom / c.EXPECTED_PLACE_FREEDOM_MAXIMUM * c.MODIFIER_PLACE_FREEDOM
| 21.35
| 80
| 0.704918
|
2ca84fdb3018a582d50c2f7e629c75366083e5de
| 9,101
|
py
|
Python
|
cpdb/trr/tests/views/test_trr_viewset.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 25
|
2018-07-20T22:31:40.000Z
|
2021-07-15T16:58:41.000Z
|
cpdb/trr/tests/views/test_trr_viewset.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 13
|
2018-06-18T23:08:47.000Z
|
2022-02-10T07:38:25.000Z
|
cpdb/trr/tests/views/test_trr_viewset.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 6
|
2018-05-17T21:59:43.000Z
|
2020-11-17T00:30:26.000Z
|
from datetime import datetime, date
from django.urls import reverse
from django.contrib.gis.geos import Point
from rest_framework.test import APITestCase
from rest_framework import status
from robber import expect
import pytz
from data.factories import PoliceUnitFactory, OfficerFactory, OfficerHistoryFactory, OfficerAllegationFactory
from email_service.constants import TRR_ATTACHMENT_REQUEST
from email_service.factories import EmailTemplateFactory
from trr.factories import TRRFactory, ActionResponseFactory
from trr.tests.mixins import TRRTestCaseMixin
class TRRViewSetTestCase(TRRTestCaseMixin, APITestCase):
def test_retrieve(self):
unit = PoliceUnitFactory(unit_name='001', description='Unit 001')
officer = OfficerFactory(
first_name='Vinh',
last_name='Vu',
rank='Detective',
race='White',
gender='M',
appointed_date=date(2000, 1, 1),
birth_year=1980,
complaint_percentile=44.4444,
civilian_allegation_percentile=11.1111,
internal_allegation_percentile=22.2222,
trr_percentile=33.3333,
last_unit=unit
)
OfficerHistoryFactory(officer=officer, unit=unit)
trr = TRRFactory(
taser=False,
firearm_used=False,
officer_assigned_beat='Beat 1',
officer_in_uniform=True,
officer_on_duty=False,
trr_datetime=datetime(2001, 1, 1, tzinfo=pytz.utc),
subject_gender='M',
subject_age=37,
officer=officer,
location_recode='Factory',
block='34XX',
street='Douglas Blvd',
beat=1021,
point=Point(1.0, 1.0)
)
OfficerAllegationFactory(
officer=officer,
allegation__incident_date=datetime(2003, 1, 1, tzinfo=pytz.utc),
start_date=date(2004, 1, 1),
end_date=date(2005, 1, 1),
final_finding='SU'
)
ActionResponseFactory(trr=trr, force_type='Verbal Commands', action_sub_category='1')
self.refresh_index()
response = self.client.get(reverse('api-v2:trr-detail', kwargs={'pk': trr.id}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'id': trr.id,
'officer_assigned_beat': 'Beat 1',
'officer_in_uniform': True,
'officer_on_duty': False,
'officer': {
'id': officer.id,
'rank': 'Detective',
'gender': 'Male',
'race': 'White',
'full_name': 'Vinh Vu',
'appointed_date': '2000-01-01',
'unit': {'unit_name': '001', 'description': 'Unit 001'},
'birth_year': 1980,
'percentile_trr': '33.3333',
'percentile_allegation_internal': '22.2222',
'percentile_allegation_civilian': '11.1111',
'percentile_allegation': '44.4444',
},
'subject_race': 'White',
'subject_gender': 'Male',
'subject_age': 37,
'force_category': 'Other',
'force_types': ['Verbal Commands'],
'date_of_incident': '2001-01-01',
'location_type': 'Factory',
'address': '34XX Douglas Blvd',
'beat': 1021,
'point': {
'lng': 1.0,
'lat': 1.0,
},
})
def test_retrieve_no_point(self):
unit = PoliceUnitFactory(unit_name='001', description='Unit 001')
officer = OfficerFactory(
first_name='Vinh',
last_name='Vu',
race='White',
gender='M',
rank='Detective',
appointed_date=date(2000, 1, 1),
birth_year=1980,
complaint_percentile=44.4444,
civilian_allegation_percentile=11.1111,
internal_allegation_percentile=22.2222,
trr_percentile=33.3333,
last_unit=unit
)
OfficerHistoryFactory(officer=officer, unit=unit)
trr = TRRFactory(
taser=False,
firearm_used=False,
officer_assigned_beat='Beat 1',
officer_in_uniform=True,
officer_on_duty=False,
trr_datetime=datetime(2001, 1, 1, tzinfo=pytz.utc),
subject_gender='M',
subject_age=37,
officer=officer,
location_recode='Factory',
block='34XX',
street='Douglas Blvd',
beat=1021,
)
OfficerAllegationFactory(
officer=officer,
allegation__incident_date=datetime(2003, 1, 1, tzinfo=pytz.utc),
start_date=date(2004, 1, 1),
end_date=date(2005, 1, 1), final_finding='SU')
ActionResponseFactory(trr=trr, force_type='Verbal Commands', action_sub_category=1)
self.refresh_index()
response = self.client.get(reverse('api-v2:trr-detail', kwargs={'pk': trr.id}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'id': trr.id,
'officer_assigned_beat': 'Beat 1',
'officer_in_uniform': True,
'officer_on_duty': False,
'officer': {
'id': officer.id,
'rank': 'Detective',
'gender': 'Male',
'race': 'White',
'full_name': 'Vinh Vu',
'appointed_date': '2000-01-01',
'unit': {'unit_name': '001', 'description': 'Unit 001'},
'birth_year': 1980,
'percentile_trr': '33.3333',
'percentile_allegation_internal': '22.2222',
'percentile_allegation_civilian': '11.1111',
'percentile_allegation': '44.4444',
},
'subject_race': 'White',
'subject_gender': 'Male',
'subject_age': 37,
'force_category': 'Other',
'force_types': ['Verbal Commands'],
'date_of_incident': '2001-01-01',
'location_type': 'Factory',
'address': '34XX Douglas Blvd',
'beat': 1021,
})
def test_retrieve_not_found(self):
response = self.client.get(reverse('api-v2:trr-detail', kwargs={'pk': 123}))
expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND)
def test_retrieve_missing_percentile(self):
officer = OfficerFactory(
civilian_allegation_percentile=None,
internal_allegation_percentile=None,
trr_percentile=None
)
trr = TRRFactory(officer=officer)
self.refresh_index()
response = self.client.get(reverse('api-v2:trr-detail', kwargs={'pk': trr.id}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
def test_request_document(self):
EmailTemplateFactory(type=TRR_ATTACHMENT_REQUEST)
TRRFactory(pk=112233)
response = self.client.post(
reverse('api-v2:trr-request-document', kwargs={'pk': 112233}),
{'email': 'valid_email@example.com'}
)
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'message': 'Thanks for subscribing',
'trr_id': 112233
})
def test_request_same_document_twice(self):
EmailTemplateFactory(type=TRR_ATTACHMENT_REQUEST)
trr = TRRFactory(pk=112233)
self.client.post(
reverse('api-v2:trr-request-document', kwargs={'pk': trr.id}),
{'email': 'valid_email@example.com'}
)
response2 = self.client.post(
reverse('api-v2:trr-request-document', kwargs={'pk': trr.id}),
{'email': 'valid_email@example.com'}
)
expect(response2.status_code).to.eq(status.HTTP_200_OK)
expect(response2.data).to.eq({
'message': 'Email already added',
'trr_id': 112233
})
def test_request_document_without_email(self):
TRRFactory(pk=321)
response = self.client.post(reverse('api-v2:trr-request-document', kwargs={'pk': 321}))
expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST)
expect(response.data).to.eq({
'message': 'Please enter a valid email'
})
def test_request_document_with_invalid_email(self):
TRRFactory(pk=321)
response = self.client.post(reverse('api-v2:trr-request-document', kwargs={'pk': 321}),
{'email': 'invalid@email'})
expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST)
expect(response.data).to.eq({
'message': 'Please enter a valid email'
})
def test_request_document_with_invalid_trr(self):
response = self.client.post(reverse('api-v2:trr-request-document', kwargs={'pk': 321}))
expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND)
| 37.607438
| 109
| 0.573893
|
c4e0e97257d29e70d698ae5b5cdec0c74cebea04
| 1,562
|
py
|
Python
|
qinling/orchestrator/kubernetes/utils.py
|
lingxiankong/qinling
|
e18f80345ae519c9308cfc93fdf53b82c9be7618
|
[
"Apache-2.0"
] | 2
|
2017-04-11T04:23:25.000Z
|
2017-05-04T13:30:46.000Z
|
qinling/orchestrator/kubernetes/utils.py
|
lingxiankong/qinling
|
e18f80345ae519c9308cfc93fdf53b82c9be7618
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
qinling/orchestrator/kubernetes/utils.py
|
lingxiankong/qinling
|
e18f80345ae519c9308cfc93fdf53b82c9be7618
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2018 Catalyst IT Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kubernetes.client import api_client
# from kubernetes.client.apis import apps_v1_api
from kubernetes.client.apis import core_v1_api
from kubernetes.client.apis import extensions_v1beta1_api
from kubernetes.client import configuration as k8s_config
def get_k8s_clients(conf):
config = k8s_config.Configuration()
config.host = conf.kubernetes.kube_host
if conf.kubernetes.use_api_certificate:
config.ssl_ca_cert = conf.kubernetes.ssl_ca_cert
config.cert_file = conf.kubernetes.cert_file
config.key_file = conf.kubernetes.key_file
else:
config.verify_ssl = False
client = api_client.ApiClient(configuration=config)
v1 = core_v1_api.CoreV1Api(client)
v1extension = extensions_v1beta1_api.ExtensionsV1beta1Api(client)
# apps_v1 = apps_v1_api.AppsV1Api(client)
clients = {
'v1': v1,
# 'apps_v1': apps_v1
'v1extension': v1extension
}
return clients
| 36.325581
| 77
| 0.736876
|
d9636c815495f1cfade70c62d2391b3ca25abf86
| 18,106
|
py
|
Python
|
src/sage/categories/algebras_with_basis.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | 2
|
2018-06-30T01:37:35.000Z
|
2018-06-30T01:37:39.000Z
|
src/sage/categories/algebras_with_basis.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/categories/algebras_with_basis.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
r"""
Algebras With Basis
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2008 Teresa Gomez-Diaz (CNRS) <Teresa.Gomez-Diaz@univ-mlv.fr>
# 2008-2013 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.misc.lazy_import import LazyImport
from sage.categories.tensor import TensorProductsCategory, tensor
from sage.categories.cartesian_product import CartesianProductsCategory
from sage.categories.category_with_axiom import CategoryWithAxiom_over_base_ring
from .unital_algebras import UnitalAlgebras
class AlgebrasWithBasis(CategoryWithAxiom_over_base_ring):
"""
The category of algebras with a distinguished basis.
EXAMPLES::
sage: C = AlgebrasWithBasis(QQ); C
Category of algebras with basis over Rational Field
sage: sorted(C.super_categories(), key=str)
[Category of algebras over Rational Field,
Category of unital algebras with basis over Rational Field]
We construct a typical parent in this category, and do some
computations with it::
sage: A = C.example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: A.category()
Category of algebras with basis over Rational Field
sage: A.one_basis()
word:
sage: A.one()
B[word: ]
sage: A.base_ring()
Rational Field
sage: A.basis().keys()
Finite words over {'a', 'b', 'c'}
sage: (a,b,c) = A.algebra_generators()
sage: a^3, b^2
(B[word: aaa], B[word: bb])
sage: a*c*b
B[word: acb]
sage: A.product
<bound method FreeAlgebra_with_category._product_from_product_on_basis_multiply of
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field>
sage: A.product(a*b,b)
B[word: abb]
sage: TestSuite(A).run(verbose=True)
running ._test_additive_associativity() . . . pass
running ._test_an_element() . . . pass
running ._test_associativity() . . . pass
running ._test_cardinality() . . . pass
running ._test_category() . . . pass
running ._test_characteristic() . . . pass
running ._test_distributivity() . . . pass
running ._test_elements() . . .
Running the test suite of self.an_element()
running ._test_category() . . . pass
running ._test_eq() . . . pass
running ._test_nonzero_equal() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_pickling() . . . pass
pass
running ._test_elements_eq_reflexive() . . . pass
running ._test_elements_eq_symmetric() . . . pass
running ._test_elements_eq_transitive() . . . pass
running ._test_elements_neq() . . . pass
running ._test_eq() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_one() . . . pass
running ._test_pickling() . . . pass
running ._test_prod() . . . pass
running ._test_some_elements() . . . pass
running ._test_zero() . . . pass
sage: A.__class__
<class 'sage.categories.examples.algebras_with_basis.FreeAlgebra_with_category'>
sage: A.element_class
<class 'sage.combinat.free_module.FreeAlgebra_with_category.element_class'>
Please see the source code of `A` (with ``A??``) for how to
implement other algebras with basis.
TESTS::
sage: TestSuite(AlgebrasWithBasis(QQ)).run()
"""
def example(self, alphabet = ('a','b','c')):
"""
Return an example of algebra with basis.
EXAMPLES::
sage: AlgebrasWithBasis(QQ).example()
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
An other set of generators can be specified as optional argument::
sage: AlgebrasWithBasis(QQ).example((1,2,3))
An example of an algebra with basis: the free algebra on the generators (1, 2, 3) over Rational Field
"""
from sage.categories.examples.algebras_with_basis import Example
return Example(self.base_ring(), alphabet)
Filtered = LazyImport('sage.categories.filtered_algebras_with_basis', 'FilteredAlgebrasWithBasis')
FiniteDimensional = LazyImport('sage.categories.finite_dimensional_algebras_with_basis', 'FiniteDimensionalAlgebrasWithBasis')
Graded = LazyImport('sage.categories.graded_algebras_with_basis', 'GradedAlgebrasWithBasis')
Super = LazyImport('sage.categories.super_algebras_with_basis', 'SuperAlgebrasWithBasis')
class ParentMethods:
# For backward compatibility
one = UnitalAlgebras.WithBasis.ParentMethods.one
# Backward compatibility temporary cruft to help migrating form CombinatorialAlgebra
def _product_from_combinatorial_algebra_multiply(self,left,right):
"""
Returns left\*right where left and right are elements of self.
product() uses either _multiply or _multiply basis to carry out
the actual multiplication.
EXAMPLES::
sage: s = SymmetricFunctions(QQ).schur()
sage: a = s([2])
sage: s._product_from_combinatorial_algebra_multiply(a,a)
s[2, 2] + s[3, 1] + s[4]
sage: s.product(a,a)
s[2, 2] + s[3, 1] + s[4]
"""
A = left.parent()
BR = A.base_ring()
z_elt = {}
#Do the case where the user specifies how to multiply basis elements
if hasattr(self, '_multiply_basis'):
for (left_m, left_c) in left._monomial_coefficients.iteritems():
for (right_m, right_c) in right._monomial_coefficients.iteritems():
res = self._multiply_basis(left_m, right_m)
#Handle the case where the user returns a dictionary
#where the keys are the monomials and the values are
#the coefficients. If res is not a dictionary, then
#it is assumed to be an element of self
if not isinstance(res, dict):
if isinstance(res, self._element_class):
res = res._monomial_coefficients
else:
res = {res: BR(1)}
for m in res:
if m in z_elt:
z_elt[ m ] = z_elt[m] + left_c * right_c * res[m]
else:
z_elt[ m ] = left_c * right_c * res[m]
#We assume that the user handles the multiplication correctly on
#his or her own, and returns a dict with monomials as keys and
#coefficients as values
else:
m = self._multiply(left, right)
if isinstance(m, self._element_class):
return m
if not isinstance(m, dict):
z_elt = m.monomial_coefficients()
else:
z_elt = m
#Remove all entries that are equal to 0
BR = self.base_ring()
zero = BR(0)
del_list = []
for m, c in z_elt.iteritems():
if c == zero:
del_list.append(m)
for m in del_list:
del z_elt[m]
return self._from_dict(z_elt)
#def _test_product(self, **options):
# tester = self._tester(**options)
# tester.assert_(self.product is not None)
# could check that self.product is in Hom( self x self, self)
def hochschild_complex(self, M):
"""
Return the Hochschild complex of ``self`` with coefficients
in ``M``.
.. SEEALSO::
:class:`~sage.homology.hochschild_complex.HochschildComplex`
EXAMPLES::
sage: R.<x> = QQ[]
sage: A = algebras.DifferentialWeyl(R)
sage: H = A.hochschild_complex(A)
sage: SGA = SymmetricGroupAlgebra(QQ, 3)
sage: T = SGA.trivial_representation()
sage: H = SGA.hochschild_complex(T)
"""
from sage.homology.hochschild_complex import HochschildComplex
return HochschildComplex(self, M)
class ElementMethods:
def __invert__(self):
"""
Return the inverse of ``self`` if ``self`` is a multiple of one,
and one is in the basis of this algebra. Otherwise throws
an error.
Caveat: this generic implementation is not complete; there
may be invertible elements in the algebra that can't be
inversed this way. It is correct though for graded
connected algebras with basis.
.. WARNING::
This might produce a result which does not belong to
the parent of ``self``, yet believes to do so. For
instance, inverting 2 times the unity will produce 1/2
times the unity, even if 1/2 is not in the base ring.
Handle with care.
EXAMPLES::
sage: C = AlgebrasWithBasis(QQ).example()
sage: x = C(2); x
2*B[word: ]
sage: ~x
1/2*B[word: ]
sage: a = C.algebra_generators().first(); a
B[word: a]
sage: ~a
Traceback (most recent call last):
...
ValueError: cannot invert self (= B[word: a])
"""
# FIXME: make this generic
mcs = self.monomial_coefficients(copy=False)
one = self.parent().one_basis()
if len(mcs) == 1 and one in mcs:
return self.parent().term(one, ~mcs[one])
else:
raise ValueError("cannot invert self (= %s)"%self)
class CartesianProducts(CartesianProductsCategory):
"""
The category of algebras with basis, constructed as Cartesian
products of algebras with basis.
Note: this construction give the direct products of algebras with basis.
See comment in :class:`Algebras.CartesianProducts
<sage.categories.algebras.Algebras.CartesianProducts>`
"""
def extra_super_categories(self):
"""
A Cartesian product of algebras with basis is endowed with
a natural algebra with basis structure.
EXAMPLES::
sage: AlgebrasWithBasis(QQ).CartesianProducts().extra_super_categories()
[Category of algebras with basis over Rational Field]
sage: AlgebrasWithBasis(QQ).CartesianProducts().super_categories()
[Category of algebras with basis over Rational Field,
Category of Cartesian products of algebras over Rational Field,
Category of Cartesian products of vector spaces with basis over Rational Field]
"""
return [self.base_category()]
class ParentMethods:
@cached_method
def one_from_cartesian_product_of_one_basis(self):
"""
Returns the one of this Cartesian product of algebras, as per ``Monoids.ParentMethods.one``
It is constructed as the Cartesian product of the ones of the
summands, using their :meth:`~AlgebrasWithBasis.ParentMethods.one_basis` methods.
This implementation does not require multiplication by
scalars nor calling cartesian_product. This might help keeping
things as lazy as possible upon initialization.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: A.one_basis()
word:
sage: B = cartesian_product((A, A, A))
sage: B.one_from_cartesian_product_of_one_basis()
B[(0, word: )] + B[(1, word: )] + B[(2, word: )]
sage: B.one()
B[(0, word: )] + B[(1, word: )] + B[(2, word: )]
sage: cartesian_product([SymmetricGroupAlgebra(QQ, 3), SymmetricGroupAlgebra(QQ, 4)]).one()
B[(0, [1, 2, 3])] + B[(1, [1, 2, 3, 4])]
"""
return self.sum_of_monomials( zip( self._sets_keys(), (set.one_basis() for set in self._sets)) )
@lazy_attribute
def one(self):
"""
TESTS::
sage: A = AlgebrasWithBasis(QQ).example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: B = cartesian_product((A, A, A))
sage: B.one()
B[(0, word: )] + B[(1, word: )] + B[(2, word: )]
"""
if all(hasattr(module, "one_basis") for module in self._sets):
return self.one_from_cartesian_product_of_one_basis
else:
return NotImplemented
#def product_on_basis(self, t1, t2):
# would be easy to implement, but without a special
# version of module morphism, this would not take
# advantage of the bloc structure
class TensorProducts(TensorProductsCategory):
"""
The category of algebras with basis constructed by tensor product of algebras with basis
"""
@cached_method
def extra_super_categories(self):
"""
EXAMPLES::
sage: AlgebrasWithBasis(QQ).TensorProducts().extra_super_categories()
[Category of algebras with basis over Rational Field]
sage: AlgebrasWithBasis(QQ).TensorProducts().super_categories()
[Category of algebras with basis over Rational Field,
Category of tensor products of algebras over Rational Field,
Category of tensor products of vector spaces with basis over Rational Field]
"""
return [self.base_category()]
class ParentMethods:
"""
implements operations on tensor products of algebras with basis
"""
@cached_method
def one_basis(self):
"""
Returns the index of the one of this tensor product of
algebras, as per ``AlgebrasWithBasis.ParentMethods.one_basis``
It is the tuple whose operands are the indices of the
ones of the operands, as returned by their
:meth:`.one_basis` methods.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: A.one_basis()
word:
sage: B = tensor((A, A, A))
sage: B.one_basis()
(word: , word: , word: )
sage: B.one()
B[word: ] # B[word: ] # B[word: ]
"""
# FIXME: this method should be conditionaly defined,
# so that B.one_basis returns NotImplemented if not
# all modules provide one_basis
if all(hasattr(module, "one_basis") for module in self._sets):
return tuple(module.one_basis() for module in self._sets)
else:
raise NotImplementedError
def product_on_basis(self, t1, t2):
"""
The product of the algebra on the basis, as per
``AlgebrasWithBasis.ParentMethods.product_on_basis``.
EXAMPLES::
sage: A = AlgebrasWithBasis(QQ).example(); A
An example of an algebra with basis: the free algebra on the generators ('a', 'b', 'c') over Rational Field
sage: (a,b,c) = A.algebra_generators()
sage: x = tensor( (a, b, c) ); x
B[word: a] # B[word: b] # B[word: c]
sage: y = tensor( (c, b, a) ); y
B[word: c] # B[word: b] # B[word: a]
sage: x*y
B[word: ac] # B[word: bb] # B[word: ca]
sage: x = tensor( ((a+2*b), c) ) ; x
B[word: a] # B[word: c] + 2*B[word: b] # B[word: c]
sage: y = tensor( (c, a) ) + 1; y
B[word: ] # B[word: ] + B[word: c] # B[word: a]
sage: x*y
B[word: a] # B[word: c] + B[word: ac] # B[word: ca] + 2*B[word: b] # B[word: c] + 2*B[word: bc] # B[word: ca]
TODO: optimize this implementation!
"""
return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.
class ElementMethods:
"""
Implements operations on elements of tensor products of algebras with basis
"""
pass
| 41.718894
| 130
| 0.548603
|
8c70882d2dffe6c41d9d6fd4c0f387399c60429b
| 1,221
|
py
|
Python
|
tabular/agents/avgmin.py
|
yooyoo9/randmin
|
0b59c2973e3a84c6d601c58ece9e77fb9d18ea66
|
[
"MIT"
] | null | null | null |
tabular/agents/avgmin.py
|
yooyoo9/randmin
|
0b59c2973e3a84c6d601c58ece9e77fb9d18ea66
|
[
"MIT"
] | null | null | null |
tabular/agents/avgmin.py
|
yooyoo9/randmin
|
0b59c2973e3a84c6d601c58ece9e77fb9d18ea66
|
[
"MIT"
] | null | null | null |
import numpy as np
from .agent import Agent
class AvgMinQlearning(Agent):
def __init__(self, env, beta=0.5, discount=0.9, learning_rate=0.01, epsilon=0.1):
super().__init__(env, discount, learning_rate, epsilon)
self.name = "AvgMin" + str(beta)
self.beta = beta
self.q = np.random.uniform(low=-1, high=1, size=(self.n_states, self.n_actions))
self.old_values = np.random.uniform(
low=-1, high=1, size=(self.n_states, self.n_actions)
)
def choose_best_action(self, state):
return np.argmax(self.old_values[state])
def calculate_diff(self):
return self.env.get_result(self.q, self.discount)
def update(self, state, action, r, ns):
q_estimate = np.max(self.old_values[ns])
td_target = r + self.discount * q_estimate
td_delta = td_target - self.q[state, action]
self.q[state, action] += self.lr * td_delta
if self.q[state, action] <= self.old_values[state, action]:
self.old_values[state, action] = self.q[state, action]
else:
self.old_values[state, action] *= 1 - self.beta
self.old_values[state, action] += self.beta * self.q[state, action]
| 38.15625
| 88
| 0.629812
|
e32c1132022597810ad95526bfacda7a084c2e04
| 32,969
|
py
|
Python
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/graphql_context_test_suite.py
|
jrouly/dagster
|
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql_tests/graphql/graphql_context_test_suite.py
|
jrouly/dagster
|
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
|
[
"Apache-2.0"
] | 1
|
2021-06-21T18:30:02.000Z
|
2021-06-25T21:18:39.000Z
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/graphql_context_test_suite.py
|
jrouly/dagster
|
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
|
[
"Apache-2.0"
] | null | null | null |
import tempfile
from abc import ABC, abstractmethod
from contextlib import contextmanager
import pytest
from dagster import check, file_relative_path
from dagster.cli.workspace import Workspace, WorkspaceProcessContext
from dagster.cli.workspace.cli_target import (
GrpcServerTarget,
ModuleTarget,
PythonFileTarget,
WorkspaceFileTarget,
)
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.instance import DagsterInstance, InstanceType
from dagster.core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher
from dagster.core.run_coordinator import DefaultRunCoordinator
from dagster.core.storage.event_log import InMemoryEventLogStorage
from dagster.core.storage.event_log.sqlite import ConsolidatedSqliteEventLogStorage
from dagster.core.storage.local_compute_log_manager import LocalComputeLogManager
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import InMemoryRunStorage
from dagster.core.storage.schedules.sqlite.sqlite_schedule_storage import SqliteScheduleStorage
from dagster.core.test_utils import ExplodingRunLauncher, instance_for_test_tempdir
from dagster.grpc.server import GrpcServerProcess
from dagster.utils import merge_dicts
from dagster.utils.test import FilesystemTestScheduler
from dagster.utils.test.postgres_instance import TestPostgresInstance
def get_main_recon_repo():
return ReconstructableRepository.for_file(file_relative_path(__file__, "setup.py"), "test_repo")
@contextmanager
def graphql_postgres_instance(overrides):
with tempfile.TemporaryDirectory() as temp_dir:
with TestPostgresInstance.docker_service_up_or_skip(
file_relative_path(__file__, "docker-compose.yml"),
"test-postgres-db-graphql",
) as pg_conn_string:
TestPostgresInstance.clean_run_storage(pg_conn_string)
TestPostgresInstance.clean_event_log_storage(pg_conn_string)
TestPostgresInstance.clean_schedule_storage(pg_conn_string)
with instance_for_test_tempdir(
temp_dir,
overrides=merge_dicts(
{
"run_storage": {
"module": "dagster_postgres.run_storage.run_storage",
"class": "PostgresRunStorage",
"config": {"postgres_url": pg_conn_string},
},
"event_log_storage": {
"module": "dagster_postgres.event_log.event_log",
"class": "PostgresEventLogStorage",
"config": {"postgres_url": pg_conn_string},
},
"schedule_storage": {
"module": "dagster_postgres.schedule_storage.schedule_storage",
"class": "PostgresScheduleStorage",
"config": {"postgres_url": pg_conn_string},
},
"scheduler": {
"module": "dagster.utils.test",
"class": "FilesystemTestScheduler",
"config": {"base_dir": temp_dir},
},
},
overrides if overrides else {},
),
) as instance:
yield instance
class MarkedManager:
"""
MarkedManagers are passed to GraphQLContextVariants. They contain
a contextmanager function "manager_fn" that yield the relevant
instace, and it includes marks that will be applied to any
context-variant-driven test case that includes this MarkedManager.
See InstanceManagers for an example construction.
See GraphQLContextVariant for further information
"""
def __init__(self, manager_fn, marks):
self.manager_fn = check.callable_param(manager_fn, "manager_fn")
self.marks = check.list_param(marks, "marks")
class InstanceManagers:
@staticmethod
def in_memory_instance():
@contextmanager
def _in_memory_instance():
with tempfile.TemporaryDirectory() as temp_dir:
yield DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=InMemoryRunStorage(),
event_storage=InMemoryEventLogStorage(),
compute_log_manager=LocalComputeLogManager(temp_dir),
run_launcher=SyncInMemoryRunLauncher(),
run_coordinator=DefaultRunCoordinator(),
schedule_storage=SqliteScheduleStorage.from_local(temp_dir),
scheduler=FilesystemTestScheduler(temp_dir),
)
return MarkedManager(_in_memory_instance, [Marks.in_memory_instance])
@staticmethod
def non_launchable_in_memory_instance():
@contextmanager
def _non_launchable_in_memory_instance():
with tempfile.TemporaryDirectory() as temp_dir:
yield DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=InMemoryRunStorage(),
event_storage=InMemoryEventLogStorage(),
compute_log_manager=LocalComputeLogManager(temp_dir),
run_launcher=ExplodingRunLauncher(),
run_coordinator=DefaultRunCoordinator(),
schedule_storage=SqliteScheduleStorage.from_local(temp_dir),
scheduler=FilesystemTestScheduler(temp_dir),
)
return MarkedManager(
_non_launchable_in_memory_instance,
[Marks.in_memory_instance, Marks.non_launchable],
)
@staticmethod
def non_launchable_sqlite_instance():
@contextmanager
def _non_launchable_sqlite_instance():
with tempfile.TemporaryDirectory() as temp_dir:
with instance_for_test_tempdir(
temp_dir,
overrides={
"scheduler": {
"module": "dagster.utils.test",
"class": "FilesystemTestScheduler",
"config": {"base_dir": temp_dir},
},
"run_launcher": {
"module": "dagster.core.test_utils",
"class": "ExplodingRunLauncher",
},
},
) as instance:
yield instance
return MarkedManager(
_non_launchable_sqlite_instance, [Marks.sqlite_instance, Marks.non_launchable]
)
@staticmethod
def non_launchable_postgres_instance():
@contextmanager
def _non_launchable_postgres_instance():
with graphql_postgres_instance(
overrides={
"run_launcher": {
"module": "dagster.core.test_utils",
"class": "ExplodingRunLauncher",
}
}
) as instance:
yield instance
return MarkedManager(
_non_launchable_postgres_instance,
[Marks.postgres_instance, Marks.non_launchable],
)
@staticmethod
def sqlite_instance_with_sync_run_launcher():
@contextmanager
def _sqlite_instance():
with tempfile.TemporaryDirectory() as temp_dir:
with instance_for_test_tempdir(
temp_dir,
overrides={
"scheduler": {
"module": "dagster.utils.test",
"class": "FilesystemTestScheduler",
"config": {"base_dir": temp_dir},
},
"run_launcher": {
"module": "dagster.core.launcher.sync_in_memory_run_launcher",
"class": "SyncInMemoryRunLauncher",
},
},
) as instance:
yield instance
return MarkedManager(_sqlite_instance, [Marks.sqlite_instance, Marks.sync_run_launcher])
# Runs launched with this instance won't actually execute since the graphql test suite
# doesn't run the daemon process that launches queued runs
@staticmethod
def sqlite_instance_with_queued_run_coordinator():
@contextmanager
def _sqlite_instance():
with tempfile.TemporaryDirectory() as temp_dir:
with instance_for_test_tempdir(
temp_dir,
overrides={
"scheduler": {
"module": "dagster.utils.test",
"class": "FilesystemTestScheduler",
"config": {"base_dir": temp_dir},
},
"run_coordinator": {
"module": "dagster.core.run_coordinator.queued_run_coordinator",
"class": "QueuedRunCoordinator",
},
},
) as instance:
yield instance
return MarkedManager(
_sqlite_instance, [Marks.sqlite_instance, Marks.queued_run_coordinator]
)
@staticmethod
def sqlite_instance_with_default_run_launcher():
@contextmanager
def _sqlite_instance_with_default_hijack():
with tempfile.TemporaryDirectory() as temp_dir:
with instance_for_test_tempdir(
temp_dir,
overrides={
"scheduler": {
"module": "dagster.utils.test",
"class": "FilesystemTestScheduler",
"config": {"base_dir": temp_dir},
},
"run_launcher": {
"module": "dagster",
"class": "DefaultRunLauncher",
"config": {
"wait_for_processes": True,
},
},
},
) as instance:
yield instance
return MarkedManager(
_sqlite_instance_with_default_hijack,
[Marks.sqlite_instance, Marks.default_run_launcher],
)
@staticmethod
def postgres_instance_with_sync_run_launcher():
@contextmanager
def _postgres_instance():
with graphql_postgres_instance(
overrides={
"run_launcher": {
"module": "dagster.core.launcher.sync_in_memory_run_launcher",
"class": "SyncInMemoryRunLauncher",
}
}
) as instance:
yield instance
return MarkedManager(
_postgres_instance,
[Marks.postgres_instance, Marks.sync_run_launcher],
)
@staticmethod
def postgres_instance_with_default_run_launcher():
@contextmanager
def _postgres_instance_with_default_hijack():
with graphql_postgres_instance(
overrides={
"run_launcher": {
"module": "dagster",
"class": "DefaultRunLauncher",
"config": {
"wait_for_processes": True,
},
},
}
) as instance:
yield instance
return MarkedManager(
_postgres_instance_with_default_hijack,
[Marks.postgres_instance, Marks.default_run_launcher],
)
@staticmethod
def consolidated_sqlite_instance():
@contextmanager
def _sqlite_asset_instance():
with tempfile.TemporaryDirectory() as temp_dir:
instance = DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=InMemoryRunStorage(),
event_storage=ConsolidatedSqliteEventLogStorage(temp_dir),
compute_log_manager=LocalComputeLogManager(temp_dir),
run_coordinator=DefaultRunCoordinator(),
run_launcher=SyncInMemoryRunLauncher(),
scheduler=FilesystemTestScheduler(temp_dir),
)
yield instance
return MarkedManager(_sqlite_asset_instance, [Marks.asset_aware_instance])
class EnvironmentManagers:
@staticmethod
def managed_grpc():
@contextmanager
def _mgr_fn(recon_repo):
"""Goes out of process via grpc"""
check.inst_param(recon_repo, "recon_repo", ReconstructableRepository)
loadable_target_origin = recon_repo.get_python_origin().loadable_target_origin
with Workspace(
(
PythonFileTarget(
python_file=loadable_target_origin.python_file,
attribute=loadable_target_origin.attribute,
working_directory=loadable_target_origin.working_directory,
location_name="test",
)
if loadable_target_origin.python_file
else ModuleTarget(
module_name=loadable_target_origin.module_name,
attribute=loadable_target_origin.attribute,
location_name="test",
)
)
) as workspace:
yield workspace
return MarkedManager(_mgr_fn, [Marks.managed_grpc_env])
@staticmethod
def deployed_grpc():
@contextmanager
def _mgr_fn(recon_repo):
check.inst_param(recon_repo, "recon_repo", ReconstructableRepository)
loadable_target_origin = recon_repo.get_python_origin().loadable_target_origin
server_process = GrpcServerProcess(loadable_target_origin=loadable_target_origin)
try:
with server_process.create_ephemeral_client() as api_client:
with Workspace(
GrpcServerTarget(
port=api_client.port,
socket=api_client.socket,
host=api_client.host,
location_name="test",
)
) as workspace:
yield workspace
finally:
server_process.wait()
return MarkedManager(_mgr_fn, [Marks.deployed_grpc_env])
@staticmethod
def multi_location():
@contextmanager
def _mgr_fn(recon_repo):
"""Goes out of process but same process as host process"""
check.inst_param(recon_repo, "recon_repo", ReconstructableRepository)
with Workspace(
WorkspaceFileTarget(paths=[file_relative_path(__file__, "multi_location.yaml")])
) as workspace:
yield workspace
return MarkedManager(_mgr_fn, [Marks.multi_location])
@staticmethod
def lazy_repository():
@contextmanager
def _mgr_fn(recon_repo):
"""Goes out of process but same process as host process"""
check.inst_param(recon_repo, "recon_repo", ReconstructableRepository)
with Workspace(
PythonFileTarget(
python_file=file_relative_path(__file__, "setup.py"),
attribute="test_dict_repo",
working_directory=None,
location_name="test",
)
) as workspace:
yield workspace
return MarkedManager(_mgr_fn, [Marks.lazy_repository])
class Marks:
# Instance type makes
in_memory_instance = pytest.mark.in_memory_instance
sqlite_instance = pytest.mark.sqlite_instance
postgres_instance = pytest.mark.postgres_instance
# Run launcher variants
sync_run_launcher = pytest.mark.sync_run_launcher
default_run_launcher = pytest.mark.default_run_launcher
queued_run_coordinator = pytest.mark.queued_run_coordinator
non_launchable = pytest.mark.non_launchable
# Repository Location marks
multi_location = pytest.mark.multi_location
managed_grpc_env = pytest.mark.managed_grpc_env
deployed_grpc_env = pytest.mark.deployed_grpc_env
lazy_repository = pytest.mark.lazy_repository
# Asset-aware sqlite variants
asset_aware_instance = pytest.mark.asset_aware_instance
# Backfill daemon variants
backfill_daemon = pytest.mark.backfill_daemon
# Readonly context variant
read_only = pytest.mark.read_only
# Common mark to all test suite tests
graphql_context_test_suite = pytest.mark.graphql_context_test_suite
def none_manager():
@contextmanager
def _yield_none(*_args, **_kwargs):
yield None
return MarkedManager(_yield_none, [])
class GraphQLContextVariant:
"""
An instance of this class represents a context variant that will be run
against *every* method in the test class, defined as a class
created by inheriting from make_graphql_context_test_suite.
It comes with a number of static methods with prebuilt context variants.
e.g. in_memory_in_process_start
One can also make bespoke context variants, provided you configure it properly
with MarkedMembers that produce its members.
Args:
marked_instance_mgr (MarkedManager): The manager_fn
within it must be a contextmanager that takes zero arguments and yields
a DagsterInstance
See InstanceManagers for examples
marked_environment_mgr (MarkedManager): The manager_fn with in
must be a contextmanager takes a default ReconstructableRepo and
yields a list of RepositoryLocation.
See EnvironmentManagers for examples
test_id [Optional] (str): This assigns a test_id to test parameterized with this
variant. This is highly convenient for running a particular variant across
the entire test suite, without running all the other variants.
e.g.
pytest python_modules/dagster-graphql/dagster_graphql_tests/ -s -k in_memory_in_process_start
Will run all tests that use the in_memory_in_process_start, which will get a lot
of code coverage while being very fast to run.
All tests managed by this system are marked with "graphql_context_test_suite".
"""
def __init__(self, marked_instance_mgr, marked_environment_mgr, read_only=False, test_id=None):
self.marked_instance_mgr = check.inst_param(
marked_instance_mgr, "marked_instance_mgr", MarkedManager
)
self.marked_environment_mgr = check.inst_param(
marked_environment_mgr, "marked_environment_mgr", MarkedManager
)
self.read_only = check.bool_param(read_only, "read_only")
self.test_id = check.opt_str_param(test_id, "test_id")
self.marks = (
marked_instance_mgr.marks
+ marked_environment_mgr.marks
+ ([Marks.read_only] if read_only else [])
)
@property
def instance_mgr(self):
return self.marked_instance_mgr.manager_fn
@property
def environment_mgr(self):
return self.marked_environment_mgr.manager_fn
@staticmethod
def in_memory_instance_managed_grpc_env():
"""
Good for tests with read-only metadata queries. Does not work
if you have to go through the run launcher.
"""
return GraphQLContextVariant(
InstanceManagers.in_memory_instance(),
EnvironmentManagers.managed_grpc(),
test_id="in_memory_instance_managed_grpc_env",
)
@staticmethod
def sqlite_with_queued_run_coordinator_managed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.sqlite_instance_with_queued_run_coordinator(),
EnvironmentManagers.managed_grpc(),
test_id="sqlite_with_queued_run_coordinator_managed_grpc_env",
)
@staticmethod
def sqlite_with_default_run_launcher_managed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.sqlite_instance_with_default_run_launcher(),
EnvironmentManagers.managed_grpc(),
test_id="sqlite_with_default_run_launcher_managed_grpc_env",
)
@staticmethod
def sqlite_read_only_with_default_run_launcher_managed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.sqlite_instance_with_default_run_launcher(),
EnvironmentManagers.managed_grpc(),
read_only=True,
test_id="sqlite_read_only_with_default_run_launcher_managed_grpc_env",
)
@staticmethod
def sqlite_with_default_run_launcher_deployed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.sqlite_instance_with_default_run_launcher(),
EnvironmentManagers.deployed_grpc(),
test_id="sqlite_with_default_run_launcher_deployed_grpc_env",
)
@staticmethod
def postgres_with_default_run_launcher_managed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.postgres_instance_with_default_run_launcher(),
EnvironmentManagers.managed_grpc(),
test_id="postgres_with_default_run_launcher_managed_grpc_env",
)
@staticmethod
def postgres_with_default_run_launcher_deployed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.postgres_instance_with_default_run_launcher(),
EnvironmentManagers.deployed_grpc(),
test_id="postgres_with_default_run_launcher_deployed_grpc_env",
)
@staticmethod
def non_launchable_sqlite_instance_multi_location():
return GraphQLContextVariant(
InstanceManagers.non_launchable_sqlite_instance(),
EnvironmentManagers.multi_location(),
test_id="non_launchable_sqlite_instance_multi_location",
)
@staticmethod
def non_launchable_sqlite_instance_lazy_repository():
return GraphQLContextVariant(
InstanceManagers.non_launchable_sqlite_instance(),
EnvironmentManagers.lazy_repository(),
test_id="non_launchable_sqlite_instance_lazy_repository",
)
@staticmethod
def non_launchable_sqlite_instance_managed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.non_launchable_sqlite_instance(),
EnvironmentManagers.managed_grpc(),
test_id="non_launchable_sqlite_instance_managed_grpc_env",
)
@staticmethod
def non_launchable_sqlite_instance_deployed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.non_launchable_sqlite_instance(),
EnvironmentManagers.deployed_grpc(),
test_id="non_launchable_sqlite_instance_deployed_grpc_env",
)
@staticmethod
def non_launchable_postgres_instance_multi_location():
return GraphQLContextVariant(
InstanceManagers.non_launchable_postgres_instance(),
EnvironmentManagers.multi_location(),
test_id="non_launchable_postgres_instance_multi_location",
)
@staticmethod
def non_launchable_postgres_instance_lazy_repository():
return GraphQLContextVariant(
InstanceManagers.non_launchable_postgres_instance(),
EnvironmentManagers.lazy_repository(),
test_id="non_launchable_postgres_instance_lazy_repository",
)
@staticmethod
def non_launchable_postgres_instance_managed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.non_launchable_postgres_instance(),
EnvironmentManagers.managed_grpc(),
test_id="non_launchable_postgres_instance_managed_grpc_env",
)
@staticmethod
def non_launchable_in_memory_instance_multi_location():
return GraphQLContextVariant(
InstanceManagers.non_launchable_in_memory_instance(),
EnvironmentManagers.multi_location(),
test_id="non_launchable_in_memory_instance_multi_location",
)
@staticmethod
def non_launchable_in_memory_instance_lazy_repository():
return GraphQLContextVariant(
InstanceManagers.non_launchable_in_memory_instance(),
EnvironmentManagers.lazy_repository(),
test_id="non_launchable_in_memory_instance_lazy_repository",
)
@staticmethod
def non_launchable_in_memory_instance_managed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.non_launchable_in_memory_instance(),
EnvironmentManagers.managed_grpc(),
test_id="non_launchable_in_memory_instance_managed_grpc_env",
)
@staticmethod
def consolidated_sqlite_instance_managed_grpc_env():
return GraphQLContextVariant(
InstanceManagers.consolidated_sqlite_instance(),
EnvironmentManagers.managed_grpc(),
test_id="asset_aware_instance_in_process_env",
)
@staticmethod
def all_variants():
"""
There is a test case that keeps this up-to-date. If you add a static
method that returns a GraphQLContextVariant you have to add it to this
list in order for tests to pass.
"""
return [
GraphQLContextVariant.in_memory_instance_managed_grpc_env(),
GraphQLContextVariant.sqlite_with_default_run_launcher_managed_grpc_env(),
GraphQLContextVariant.sqlite_read_only_with_default_run_launcher_managed_grpc_env(),
GraphQLContextVariant.sqlite_with_default_run_launcher_deployed_grpc_env(),
GraphQLContextVariant.sqlite_with_queued_run_coordinator_managed_grpc_env(),
GraphQLContextVariant.postgres_with_default_run_launcher_managed_grpc_env(),
GraphQLContextVariant.postgres_with_default_run_launcher_deployed_grpc_env(),
GraphQLContextVariant.non_launchable_in_memory_instance_multi_location(),
GraphQLContextVariant.non_launchable_in_memory_instance_managed_grpc_env(),
GraphQLContextVariant.non_launchable_in_memory_instance_lazy_repository(),
GraphQLContextVariant.non_launchable_sqlite_instance_multi_location(),
GraphQLContextVariant.non_launchable_sqlite_instance_managed_grpc_env(),
GraphQLContextVariant.non_launchable_sqlite_instance_deployed_grpc_env(),
GraphQLContextVariant.non_launchable_sqlite_instance_lazy_repository(),
GraphQLContextVariant.non_launchable_postgres_instance_multi_location(),
GraphQLContextVariant.non_launchable_postgres_instance_managed_grpc_env(),
GraphQLContextVariant.non_launchable_postgres_instance_lazy_repository(),
GraphQLContextVariant.consolidated_sqlite_instance_managed_grpc_env(),
]
@staticmethod
def all_executing_variants():
return [
GraphQLContextVariant.sqlite_with_default_run_launcher_managed_grpc_env(),
GraphQLContextVariant.sqlite_with_default_run_launcher_deployed_grpc_env(),
GraphQLContextVariant.postgres_with_default_run_launcher_managed_grpc_env(),
GraphQLContextVariant.postgres_with_default_run_launcher_deployed_grpc_env(),
]
@staticmethod
def all_readonly_variants():
"""
Return all read only variants. If you try to run any mutation these will error
"""
return _variants_with_mark(GraphQLContextVariant.all_variants(), pytest.mark.read_only)
@staticmethod
def all_non_launchable_variants():
"""
Return all non_launchable variants. If you try to start or launch these will error
"""
return _variants_with_mark(GraphQLContextVariant.all_variants(), pytest.mark.non_launchable)
@staticmethod
def all_multi_location_variants():
return _variants_with_mark(GraphQLContextVariant.all_variants(), pytest.mark.multi_location)
def _variants_with_mark(variants, mark):
def _yield_all():
for variant in variants:
if mark in variant.marks:
yield variant
return list(_yield_all())
def _variants_without_marks(variants, marks):
def _yield_all():
for variant in variants:
if all(mark not in variant.marks for mark in marks):
yield variant
return list(_yield_all())
@contextmanager
def manage_graphql_context(context_variant, recon_repo=None):
recon_repo = recon_repo if recon_repo else get_main_recon_repo()
with context_variant.instance_mgr() as instance:
with context_variant.environment_mgr(recon_repo) as workspace:
yield WorkspaceProcessContext(
instance=instance, workspace=workspace, read_only=context_variant.read_only
).create_request_context()
class _GraphQLContextTestSuite(ABC):
@abstractmethod
def yield_graphql_context(self, request):
pass
@abstractmethod
def recon_repo(self):
pass
@contextmanager
def graphql_context_for_request(self, request):
check.param_invariant(
isinstance(request.param, GraphQLContextVariant),
"request",
"params in fixture must be List[GraphQLContextVariant]",
)
with manage_graphql_context(request.param, self.recon_repo()) as graphql_context:
yield graphql_context
def graphql_context_variants_fixture(context_variants):
check.list_param(context_variants, "context_variants", of_type=GraphQLContextVariant)
def _wrap(fn):
return pytest.fixture(
name="graphql_context",
params=[
pytest.param(
context_variant,
id=context_variant.test_id,
marks=context_variant.marks + [Marks.graphql_context_test_suite],
)
for context_variant in context_variants
],
)(fn)
return _wrap
def make_graphql_context_test_suite(context_variants, recon_repo=None):
"""
Arguments:
runs (List[GraphQLContextVariant]): List of runs to run per test in this class.
recon_repo (ReconstructableRepository): Repository to run against. Defaults
to "define_repository" in setup.py
This is the base class factory for test suites in the dagster-graphql test.
The goal of this suite is to make it straightforward to run tests
against multiple graphql_contexts, have a coherent lifecycle for those
contexts.
GraphQLContextVariant has a number of static methods to provide common run configurations
as well as common groups of run configuration
One can also make bespoke GraphQLContextVariants which specific implementations
of DagsterInstance, RepositoryLocation, and so forth. See that class
for more details.
Example:
class TestAThing(
make_graphql_context_test_suite(
context_variants=[GraphQLContextVariant.in_memory_in_process_start()]
)
):
def test_graphql_context_exists(self, graphql_context):
assert graphql_context
"""
check.list_param(context_variants, "context_variants", of_type=GraphQLContextVariant)
recon_repo = check.inst_param(
recon_repo if recon_repo else get_main_recon_repo(), "recon_repo", ReconstructableRepository
)
class _SpecificTestSuiteBase(_GraphQLContextTestSuite):
@graphql_context_variants_fixture(context_variants=context_variants)
def yield_graphql_context(self, request):
with self.graphql_context_for_request(request) as graphql_context:
yield graphql_context
def recon_repo(self):
return recon_repo
return _SpecificTestSuiteBase
ReadonlyGraphQLContextTestMatrix = make_graphql_context_test_suite(
context_variants=GraphQLContextVariant.all_readonly_variants()
)
NonLaunchableGraphQLContextTestMatrix = make_graphql_context_test_suite(
context_variants=GraphQLContextVariant.all_non_launchable_variants()
)
ExecutingGraphQLContextTestMatrix = make_graphql_context_test_suite(
context_variants=GraphQLContextVariant.all_executing_variants()
)
| 39.24881
| 100
| 0.64597
|
23e20e29215fdc1357b1659c3e8007e62e310f12
| 3,687
|
py
|
Python
|
salt/states/nxos_upgrade.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 19
|
2016-01-29T14:37:52.000Z
|
2022-03-30T18:08:01.000Z
|
salt/states/nxos_upgrade.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 223
|
2016-03-02T16:39:41.000Z
|
2022-03-03T12:26:35.000Z
|
salt/states/nxos_upgrade.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 64
|
2016-02-04T19:45:26.000Z
|
2021-12-15T02:02:31.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Manage NX-OS System Image Upgrades.
.. versionadded: xxxx.xx.x
:maturity: new
:platform: nxos
:codeauthor: Michael G Wiebe
For documentation on setting up the nxos proxy minion look in the documentation
for :mod:`salt.proxy.nxos<salt.proxy.nxos>`.
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
__virtualname__ = "nxos"
__virtual_aliases__ = ("nxos_upgrade",)
log = logging.getLogger(__name__)
def __virtual__():
return __virtualname__
def image_running(name, system_image, kickstart_image=None, issu=True, **kwargs):
"""
Ensure the NX-OS system image is running on the device.
name
Name of the salt state task
system_image
Name of the system image file on bootflash:
kickstart_image
Name of the kickstart image file on bootflash:
This is not needed if the system_image is a combined system and
kickstart image
Default: None
issu
Ensure the correct system is running on the device using an in service
software upgrade, or force a disruptive upgrade by setting the option
to False.
Default: False
timeout
Timeout in seconds for long running 'install all' upgrade command.
Default: 900
Examples:
.. code-block:: yaml
upgrade_software_image_n9k:
nxos.image_running:
- name: Ensure nxos.7.0.3.I7.5a.bin is running
- system_image: nxos.7.0.3.I7.5a.bin
- issu: True
upgrade_software_image_n7k:
nxos.image_running:
- name: Ensure n7000-s2-kickstart.8.0.1.bin is running
- kickstart_image: n7000-s2-kickstart.8.0.1.bin
- system_image: n7000-s2-dk9.8.0.1.bin
- issu: False
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
if kickstart_image is None:
upgrade = __salt__["nxos.upgrade"](
system_image=system_image, issu=issu, **kwargs
)
else:
upgrade = __salt__["nxos.upgrade"](
system_image=system_image,
kickstart_image=kickstart_image,
issu=issu,
**kwargs
)
if upgrade["upgrade_in_progress"]:
ret["result"] = upgrade["upgrade_in_progress"]
ret["changes"] = upgrade["module_data"]
ret["comment"] = "NX-OS Device Now Being Upgraded - See Change Details Below"
elif upgrade["succeeded"]:
ret["result"] = upgrade["succeeded"]
ret["comment"] = "NX-OS Device Running Image: {}".format(_version_info())
else:
ret["comment"] = "Upgrade Failed: {}.".format(upgrade["error_data"])
return ret
def _version_info():
"""
Helper method to return running image version
"""
if "NXOS" in __grains__["nxos"]["software"]:
return __grains__["nxos"]["software"]["NXOS"]
elif "kickstart" in __grains__["nxos"]["software"]:
return __grains__["nxos"]["software"]["kickstart"]
else:
return "Unable to detect sofware version"
| 30.471074
| 85
| 0.653648
|
da1614e8482f3484ae56956d4bcb920b0ea5d074
| 80
|
py
|
Python
|
auth_token/version.py
|
cedel1/django-auth-token
|
fc108468de8952c05c067e37bd0aa024345d86e3
|
[
"BSD-3-Clause"
] | null | null | null |
auth_token/version.py
|
cedel1/django-auth-token
|
fc108468de8952c05c067e37bd0aa024345d86e3
|
[
"BSD-3-Clause"
] | null | null | null |
auth_token/version.py
|
cedel1/django-auth-token
|
fc108468de8952c05c067e37bd0aa024345d86e3
|
[
"BSD-3-Clause"
] | null | null | null |
VERSION = (0, 1, 5)
def get_version():
return '.'.join(map(str, VERSION))
| 13.333333
| 38
| 0.5875
|
1b714740635b967ba9761679d5bb67e90b1103f3
| 5,241
|
py
|
Python
|
welleng/visual.py
|
kwinkunks/welleng
|
d0669b9b5164671ff4861a4efd33666c3fc9758f
|
[
"Apache-2.0"
] | 1
|
2020-12-26T14:42:51.000Z
|
2020-12-26T14:42:51.000Z
|
welleng/visual.py
|
kwinkunks/welleng
|
d0669b9b5164671ff4861a4efd33666c3fc9758f
|
[
"Apache-2.0"
] | null | null | null |
welleng/visual.py
|
kwinkunks/welleng
|
d0669b9b5164671ff4861a4efd33666c3fc9758f
|
[
"Apache-2.0"
] | null | null | null |
import trimesh
from vedo import show, Box, Axes, trimesh2vedo, Lines, colorMap, Arrows, Text
import numpy as np
from .version import __version__ as VERSION
class World:
def __init__(
self,
bb_center,
length,
width,
height
):
self.bb_center = bb_center
self.length = length
self.width = width
self.height = height
self.world = Box(
bb_center,
length,
width,
height
).wireframe()
def plot(
data,
names=None,
colors=None,
lines=None,
targets=None,
arrows=None,
text=None
):
"""
A vedo wrapper for quicly visualizing well trajectories for QAQC purposes.
Parameters
----------
data: a trimesh.Trimesh object or a list of trimesh.Trimesh
objects or a trmiesh.scene object
names: list of strings (default: None)
A list of names, index aligned to the list of well meshes.
colors: list of strings (default: None)
A list of color or colors. If a single color is listed then this is
applied to all meshes in data, otherwise the list of colors is
indexed to the list of meshes.
"""
if isinstance(data, trimesh.scene.scene.Scene):
meshes = [v for k, v in data.geometry.items()]
if names == None:
names = list(data.geometry.keys())
# handle a single mesh being passed
elif isinstance(data, trimesh.Trimesh):
meshes = [data]
else:
meshes = data
if names is not None:
assert len(names) == len(data), \
"Names must be length of meshes list else None"
if colors is not None:
if len(colors) == 1:
colors = colors * len(names)
else:
assert len(colors) == len(names), \
"Colors must be length of meshes list, 1 else None"
meshes_vedo = []
for i, mesh in enumerate(meshes):
if i == 0:
vertices = np.array(mesh.vertices)
start_locations = np.array([mesh.vertices[0]])
else:
vertices = np.concatenate((vertices, np.array(mesh.vertices)), axis=0)
start_locations = np.concatenate((start_locations, np.array([mesh.vertices[0]])), axis=0)
# convert to vedo mesh
m_vedo = trimesh2vedo(mesh)
if colors is not None:
m_vedo.c(colors[i])
if names is not None:
m_vedo.flag(names[i])
meshes_vedo.append(m_vedo)
w = get_bb(vertices)
axes = get_axes(w.world)
# try and figure out a nice start camera position
pos = w.bb_center
vec1 = pos - [w.length, w.width, 0]
vec2 = np.array([vec1[1], vec1[0], 0])
pos_new = [pos[0], pos[1], -4000] + vec2 * 3
camera_opts = dict(
pos = pos_new,
focalPoint = pos,
viewup = [0, 0, -1]
)
show(
meshes_vedo,
w.world,
lines,
targets,
arrows,
axes,
bg='lightgrey',
bg2='lavender',
camera=camera_opts,
interactorStyle=10,
resetcam=True,
interactive=True,
verbose=True,
title=f'welleng {VERSION}'
)
def get_start_location(start_locations):
start_location = np.average(start_locations, axis=0)
start_location[2] = np.amin(start_locations[:,2], axis=0)
return start_location
def get_bb(vertices, min_size=[1000,1000,0]):
bb_max = np.amax(vertices, axis=0)
bb_min = np.amin(vertices, axis=0)
l, w, h = np.amax(np.array([(bb_max - bb_min), min_size]), axis=0)
bb_center = bb_min + np.array(bb_max - bb_min) / 2
world = World(
bb_center,
l,
w,
h
)
return world
# make a dictionary of axes options
def get_axes(world):
axes = Axes(
world,
xtitle='y: North (m)', # swap axis to plot correctly
ytitle='x: East (m)',
ztitle='z: TVD (m)',
xTitleJustify='bottom-right',
yTitleJustify='top-right',
zTitleJustify='top-right',
xyGrid2=True, xyGrid=False,
zxGrid=True, yzGrid2=True,
zxGridTransparent=True, yzGrid2Transparent=True,
yzGrid=False,
xLabelRotation=-1,
yLabelRotation=1,
zLabelRotation=1,
)
for a in axes.unpack(): # unpack the Assembly to access its elements
if 'title' in a.name or 'NumericLabel' in a.name: a.mirror('y')
if 'yNumericLabel' in a.name: a.scale(0.8)
return axes
def get_lines(clearance):
"""
Add lines per reference well interval between the closest points on the
reference well and the offset well and color them according to the
calculated Separation Factor (SF) between the two wells at these points.
Parameters
----------
clearance: welleng.clearance object
Returns
-------
lines: vedo.Lines object
A vedo.Lines object colored by the object's SF values.
"""
c = clearance.SF
start_points, end_points = clearance.get_lines()
lines = Lines(start_points, end_points).cmap('hot_r', c, on='cells')
lines.addScalarBar(title='SF')
return lines
| 28.32973
| 101
| 0.583667
|
35d63b006dc10173a6c7f20de42e74e9c1c36928
| 1,896
|
py
|
Python
|
code/model_kit/rnn_layer.py
|
jiwoongim/IMsML
|
2f5647794b433c1cb63f14ba1bcbf4919ca48b87
|
[
"MIT"
] | null | null | null |
code/model_kit/rnn_layer.py
|
jiwoongim/IMsML
|
2f5647794b433c1cb63f14ba1bcbf4919ca48b87
|
[
"MIT"
] | null | null | null |
code/model_kit/rnn_layer.py
|
jiwoongim/IMsML
|
2f5647794b433c1cb63f14ba1bcbf4919ca48b87
|
[
"MIT"
] | null | null | null |
import math
import tensorflow as tf
from utils.nn_utils import init_weights
class RNNLayer(object):
def __init__(self, D, M, scope_name):
''' D - Dimension of the input
M - Dimention of the output
scope_name - Name of the layer '''
self.D = D
self.M = M
self.scope_name = scope_name
self._initialize_weights()
def __call__(self, X):
if type(xs) != list: xs = [xs]
assert len(xs) == len(self.Ws), \
"Expected %d input vectors, got %d" % (len(self.Ws), len(xs))
with tf.variable_scope(self.scope):
return self.fp(X)
def _initialize_params(self):
'''Initialize parameters in the layer'''
with tf.variable_scope(self.scope_name):
self.Wx = tf.get_variable("Wx", shape=[self.D, self.M], \
initializer=init_weights('xavier'))
hid2hid = [self.M, self.M]
self.Wh = tf.get_variable("Wx", shape=hid2hid, \
initializer=init_weights('identity')(hid2hid))
self.hbias = tf.get_variable("hbias", shape=[self.M], \
initialzer=init_weights('zeros'))
self.params = [self.Wx, self.Wh, self.hbias]
def copy(self, scope=None):
scope = self.scope_name + "_copy"
with tf.variable_scope(scope) as sc:
for v in self.variables():
tf.get_variable(v.name, v.get_shape(),
initializer=lambda x,dtype=tf.float32: v.initialized_value())
sc.reuse_variables()
return Layer(self.input_sizes, self.output_size, scope=sc)
def fp(self, X, H):
'''Forward propagation'''
with tf.variable_scope(self.scope_name):
logit = tf.matmul(X, self.Wx) + tf.matmul(X, self.Wx) + self.hbias
return activation(logit)
| 27.882353
| 85
| 0.565401
|
7eb23048cbc5a7131397293223651429b172ba1a
| 4,791
|
py
|
Python
|
util/diff_generated_util_output.py
|
asb/opentitan
|
af68ff5041b10c81e97adc075a4d042f8ac7ab20
|
[
"Apache-2.0"
] | 1,375
|
2019-11-05T15:11:00.000Z
|
2022-03-28T17:50:43.000Z
|
util/diff_generated_util_output.py
|
asb/opentitan
|
af68ff5041b10c81e97adc075a4d042f8ac7ab20
|
[
"Apache-2.0"
] | 7,045
|
2019-11-05T16:05:45.000Z
|
2022-03-31T23:08:08.000Z
|
util/diff_generated_util_output.py
|
asb/opentitan
|
af68ff5041b10c81e97adc075a4d042f8ac7ab20
|
[
"Apache-2.0"
] | 428
|
2019-11-05T15:00:20.000Z
|
2022-03-28T15:34:57.000Z
|
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""Show a diff between the generated output from utils in the current working
tree versus a previous revision (HEAD by default). This makes it easy to
inspect the impact of modifications to either the utility implementation or
its input files on the generated output (e.g. HTML).
"""
import argparse
import os
import shlex
import subprocess
import sys
import tempfile
from reggen import version
# Test list format:
# output, outisdir, commandpre, commandpost
# if outisdir then it will be mkdired
# command is commandpre + fullpath_output + commandpost
testlist = [
["regdoc.md", False,
"./regtool.py --doc > ", ""],
["uart_rtl", True,
"./regtool.py -r -t ", " ../hw/ip/uart/data/uart.hjson"],
["uart_dv", True,
"./regtool.py -s -t ", " ../hw/ip/uart/data/uart.hjson"],
# generating include define headers
["uart.h", False,
"./regtool.py -D ../hw/ip/uart/data/uart.hjson > ", ""],
["gpio.h", False,
"./regtool.py -D ../hw/ip/gpio/data/gpio.hjson > ", ""],
["spi_device.h", False,
"./regtool.py -D ../hw/ip/spi_device/data/spi_device.hjson > ", ""]
] # yapf: disable
def generate_output(outdir, verbose):
for t in testlist:
out = shlex.quote(os.path.join(outdir, t[0]))
if t[1]:
# in new tmpdir so the directory should never be there already
os.mkdir(out)
errors_out = open(out + ".STDERR", 'w', encoding='UTF-8')
with errors_out:
err = subprocess.call(t[2] + out + t[3],
stderr=errors_out,
shell=True)
# write a file so it pops up in the diff
# if it is different
# (i.e. won't mention any that always return same error)
if err != 0:
rtn_out = open(out + ".RETURN", 'w', encoding='UTF-8')
with rtn_out:
rtn_out.write("Non-Zero Return code " + str(err) + "\n")
# useful for debug:
if (verbose):
subprocess.call("ls -l " + outdir, shell=True)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("treeish",
default="HEAD",
nargs="?",
help="git tree or commit to compare against")
parser.add_argument('--version',
action='store_true',
help='Show version and exit')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Verbose output: ls the output directories')
args = parser.parse_args()
if args.version:
version.show_and_exit(__file__, [])
args.treeish = shlex.quote(args.treeish)
util_path = os.path.dirname(os.path.realpath(__file__))
repo_root = os.path.abspath(os.path.join(util_path, os.pardir))
os.chdir(repo_root)
if not os.path.isdir(os.path.join(repo_root, '.git')):
print("Script not in expected location in a git repo", file=sys.stderr)
sys.exit(1)
# Exit early if there are no diffs between the working tree and
# args.treeish.
output = subprocess.check_output("git diff " + args.treeish, shell=True)
if not output:
sys.exit(0)
# Create temporary directories in util_path rather than defaulting to /tmp.
# /tmp may be small and may may be mounted noexec.
tempfile.tempdir = util_path
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir_basename = os.path.basename(tmpdir)
subprocess.check_call("git archive " + args.treeish +
" | tar -x -C util/" + tmpdir_basename,
shell=True)
# Execute commands for working tree, saving output
os.chdir(util_path)
newoutdir = os.path.join(tmpdir, "newout")
os.mkdir(newoutdir)
generate_output(newoutdir, args.verbose)
# Execute commands for previous revision, saving output
os.chdir(os.path.join(tmpdir_basename, "util"))
oldoutdir = os.path.join(tmpdir, "oldout")
os.mkdir(oldoutdir)
generate_output(oldoutdir, args.verbose)
# Show diff (if any)
os.chdir(tmpdir)
# Don't use a checked call because the exit code indicates whether there
# is a diff or not, rather than indicating error.
subprocess.call('git diff -p --stat --no-index oldout newout',
shell=True)
if __name__ == "__main__":
main()
| 36.022556
| 80
| 0.599457
|
59328e20a3f0d40a213ad5b26ce3234a851f6c51
| 51,307
|
py
|
Python
|
azure/storage/blob/blockblobservice.py
|
RobertoPrevato/azure-storage-python
|
fae8ed9916095cc1fc17ada44e6406f96f7bd11d
|
[
"Apache-2.0"
] | 5
|
2018-03-21T12:59:53.000Z
|
2020-11-30T12:24:18.000Z
|
azure/storage/blob/blockblobservice.py
|
RobertoPrevato/azure-storage-python
|
fae8ed9916095cc1fc17ada44e6406f96f7bd11d
|
[
"Apache-2.0"
] | null | null | null |
azure/storage/blob/blockblobservice.py
|
RobertoPrevato/azure-storage-python
|
fae8ed9916095cc1fc17ada44e6406f96f7bd11d
|
[
"Apache-2.0"
] | 3
|
2018-10-09T18:35:19.000Z
|
2019-03-13T09:43:02.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
from io import (
BytesIO
)
from os import (
path,
)
from azure.storage.common._common_conversion import (
_encode_base64,
_to_str,
_int_to_str,
_datetime_to_utc_string,
_get_content_md5,
)
from azure.storage.common._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
)
from azure.storage.common._error import (
_validate_not_none,
_validate_type_bytes,
_validate_encryption_required,
_validate_encryption_unsupported,
_ERROR_VALUE_NEGATIVE,
_ERROR_VALUE_SHOULD_BE_STREAM
)
from azure.storage.common._http import HTTPRequest
from azure.storage.common._serialization import (
_get_request_body,
_get_data_bytes_only,
_get_data_bytes_or_stream_only,
_add_metadata_headers,
)
from azure.storage.common._serialization import (
_len_plus
)
from ._deserialization import (
_convert_xml_to_block_list,
_parse_base_properties,
)
from ._encryption import (
_encrypt_blob,
_generate_blob_encryption_data,
)
from ._serialization import (
_convert_block_list_to_xml,
_get_path,
)
from ._upload_chunking import (
_BlockBlobChunkUploader,
_upload_blob_chunks,
_upload_blob_substream_blocks,
)
from .baseblobservice import BaseBlobService
from .models import (
_BlobTypes,
)
class BlockBlobService(BaseBlobService):
'''
Block blobs let you upload large blobs efficiently. Block blobs are comprised
of blocks, each of which is identified by a block ID. You create or modify a
block blob by writing a set of blocks and committing them by their block IDs.
Each block can be a different size, up to a maximum of 4 MB, and a block blob
can include up to 50,000 blocks. The maximum size of a block blob is therefore
slightly more than 195 GB (4 MB X 50,000 blocks). If you are writing a block
blob that is no more than 64 MB in size, you can upload it in its entirety with
a single write operation; see create_blob_from_bytes.
:ivar int MAX_SINGLE_PUT_SIZE:
The largest size upload supported in a single put call. This is used by
the create_blob_from_* methods if the content length is known and is less
than this value.
:ivar int MAX_BLOCK_SIZE:
The size of the blocks put by create_blob_from_* methods if the content
length is unknown or is larger than MAX_SINGLE_PUT_SIZE. Smaller blocks
may be put. The maximum block size the service supports is 100MB.
:ivar int MIN_LARGE_BLOCK_UPLOAD_THRESHOLD:
The minimum block size at which the the memory-optimized, block upload
algorithm is considered. This algorithm is only applicable to the create_blob_from_file and
create_blob_from_stream methods and will prevent the full buffering of blocks.
In addition to the block size, ContentMD5 validation and Encryption must be disabled as
these options require the blocks to be buffered.
'''
MAX_SINGLE_PUT_SIZE = 64 * 1024 * 1024
MAX_BLOCK_SIZE = 4 * 1024 * 1024
MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = 4 * 1024 * 1024 + 1
def __init__(self, account_name=None, account_key=None, sas_token=None,
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
custom_domain=None, request_session=None, connection_string=None, socket_timeout=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given, or if a custom
domain is used with anonymous authentication.
:param str account_key:
The storage account key. This is used for shared key authentication.
If neither account key or sas token is specified, anonymous access
will be used.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign. If neither are
specified, anonymous access will be used.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param str custom_domain:
The custom domain to use. This can be set in the Azure Portal. For
example, 'www.mydomain.com'.
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
:param int socket_timeout:
If specified, this will override the default socket timeout. The timeout specified is in seconds.
See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
'''
self.blob_type = _BlobTypes.BlockBlob
super(BlockBlobService, self).__init__(
account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
custom_domain, request_session, connection_string, socket_timeout)
def put_block(self, container_name, blob_name, block, block_id,
validate_content=False, lease_id=None, timeout=None):
'''
Creates a new block to be committed as part of a blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param block: Content of the block.
:type block: io.IOBase or bytes
Content of the block.
:param str block_id:
A valid Base64 string value that identifies the block. Prior to
encoding, the string must be less than or equal to 64 bytes in size.
For a given blob, the length of the value specified for the blockid
parameter must be the same size for each block. Note that the Base64
string must be URL-encoded.
:param bool validate_content:
If true, calculates an MD5 hash of the block content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
blob.
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
self._put_block(
container_name,
blob_name,
block,
block_id,
validate_content=validate_content,
lease_id=lease_id,
timeout=timeout
)
def put_block_list(
self, container_name, blob_name, block_list, content_settings=None,
metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None,
timeout=None):
'''
Writes a blob by specifying the list of block IDs that make up the blob.
In order to be written as part of a blob, a block must have been
successfully written to the server in a prior Put Block operation.
You can call Put Block List to update a blob by uploading only those
blocks that have changed, then committing the new and existing blocks
together. You can do this by specifying whether to commit a block from
the committed block list or from the uncommitted block list, or to commit
the most recently uploaded version of the block, whichever list it may
belong to.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param block_list:
A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state.
:type block_list: list(:class:`~azure.storage.blob.models.BlobBlock`)
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set properties on the blob.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param bool validate_content:
If true, calculates an MD5 hash of the block list content. The storage
service checks the hash of the block list content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this check is associated with
the block list content, and not with the content of the blob itself.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Block Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
return self._put_block_list(
container_name,
blob_name,
block_list,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout
)
def get_block_list(self, container_name, blob_name, snapshot=None,
block_list_type=None, lease_id=None, timeout=None):
'''
Retrieves the list of blocks that have been uploaded as part of a
block blob. There are two block lists maintained for a blob:
Committed Block List:
The list of blocks that have been successfully committed to a
given blob with Put Block List.
Uncommitted Block List:
The list of blocks that have been uploaded for a blob using
Put Block, but that have not yet been committed. These blocks
are stored in Azure in association with a blob, but do not yet
form part of the blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
Datetime to determine the time to retrieve the blocks.
:param str block_list_type:
Specifies whether to return the list of committed blocks, the list
of uncommitted blocks, or both lists together. Valid values are:
committed, uncommitted, or all.
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: list committed and/or uncommitted blocks for Block Blob
:rtype: :class:`~azure.storage.blob.models.BlobBlockList`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'blocklist',
'snapshot': _to_str(snapshot),
'blocklisttype': _to_str(block_list_type),
'timeout': _int_to_str(timeout),
}
request.headers = {'x-ms-lease-id': _to_str(lease_id)}
return self._perform_request(request, _convert_xml_to_block_list)
# ----Convenience APIs-----------------------------------------------------
async def create_blob_from_path(
self, container_name, blob_name, file_path, content_settings=None,
metadata=None, validate_content=False, progress_callback=None,
max_connections=2, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
Creates a new blob from a file path, or updates the content of an
existing blob, with automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str file_path:
Path of the file to upload as the blob content.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob. Also note that if enabled, the memory-efficient upload algorithm
will not be used, because computing the MD5 hash requires buffering
entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param int max_connections:
Maximum number of parallel connections to use when the blob size exceeds
64MB.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Block Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
return await self.create_blob_from_stream(
container_name=container_name,
blob_name=blob_name,
stream=stream,
count=count,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
lease_id=lease_id,
progress_callback=progress_callback,
max_connections=max_connections,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout)
async def create_blob_from_stream(
self, container_name, blob_name, stream, count=None,
content_settings=None, metadata=None, validate_content=False,
progress_callback=None, max_connections=2, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, use_byte_buffer=False):
'''
Creates a new blob from a file/stream, or updates the content of
an existing blob, with automatic chunking and progress
notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param io.IOBase stream:
Opened file/stream to upload as the blob content.
:param int count:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob. Also note that if enabled, the memory-efficient upload algorithm
will not be used, because computing the MD5 hash requires buffering
entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param int max_connections:
Maximum number of parallel connections to use when the blob size exceeds
64MB. Note that parallel upload requires the stream to be seekable.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:param bool use_byte_buffer:
If True, this will force usage of the original full block buffering upload path.
By default, this value is False and will employ a memory-efficient,
streaming upload algorithm under the following conditions:
The provided stream is seekable, 'require_encryption' is False, and
MAX_BLOCK_SIZE >= MIN_LARGE_BLOCK_UPLOAD_THRESHOLD.
One should consider the drawbacks of using this approach. In order to achieve
memory-efficiency, a IOBase stream or file-like object is segmented into logical blocks
using a SubStream wrapper. In order to read the correct data, each SubStream must acquire
a lock so that it can safely seek to the right position on the shared, underlying stream.
If max_connections > 1, the concurrency will result in a considerable amount of seeking on
the underlying stream. For the most common inputs such as a file-like stream object, seeking
is an inexpensive operation and this is not much of a concern. However, for other variants of streams
this may not be the case. The trade-off for memory-efficiency must be weighed against the cost of seeking
with your input stream.
The SubStream class will attempt to buffer up to 4 MB internally to reduce the amount of
seek and read calls to the underlying stream. This is particularly beneficial when uploading larger blocks.
:return: ETag and last modified properties for the Block Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_encryption_required(self.require_encryption, self.key_encryption_key)
# Adjust count to include padding if we are expected to encrypt.
adjusted_count = count
if (self.key_encryption_key is not None) and (adjusted_count is not None):
adjusted_count += (16 - (count % 16))
# Do single put if the size is smaller than MAX_SINGLE_PUT_SIZE
if adjusted_count is not None and (adjusted_count < self.MAX_SINGLE_PUT_SIZE):
if progress_callback:
progress_callback(0, count)
data = stream.read(count)
resp = await self._put_blob(
container_name=container_name,
blob_name=blob_name,
blob=data,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout)
if progress_callback:
progress_callback(count, count)
return resp
else: # Size is larger than MAX_SINGLE_PUT_SIZE, must upload with multiple put_block calls
cek, iv, encryption_data = None, None, None
use_original_upload_path = use_byte_buffer or validate_content or self.require_encryption or \
self.MAX_BLOCK_SIZE < self.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD or \
hasattr(stream, 'seekable') and not stream.seekable() or \
not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
if use_original_upload_path:
if self.key_encryption_key:
cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
block_ids = await _upload_blob_chunks(
blob_service=self,
container_name=container_name,
blob_name=blob_name,
blob_size=count,
block_size=self.MAX_BLOCK_SIZE,
stream=stream,
max_connections=max_connections,
progress_callback=progress_callback,
validate_content=validate_content,
lease_id=lease_id,
uploader_class=_BlockBlobChunkUploader,
timeout=timeout,
content_encryption_key=cek,
initialization_vector=iv
)
else:
block_ids = await _upload_blob_substream_blocks(
blob_service=self,
container_name=container_name,
blob_name=blob_name,
blob_size=count,
block_size=self.MAX_BLOCK_SIZE,
stream=stream,
max_connections=max_connections,
progress_callback=progress_callback,
validate_content=validate_content,
lease_id=lease_id,
uploader_class=_BlockBlobChunkUploader,
timeout=timeout,
)
return await self._put_block_list(
container_name=container_name,
blob_name=blob_name,
block_list=block_ids,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
encryption_data=encryption_data
)
async def create_blob_from_bytes(
self, container_name, blob_name, blob, index=0, count=None,
content_settings=None, metadata=None, validate_content=False,
progress_callback=None, max_connections=2, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None):
'''
Creates a new blob from an array of bytes, or updates the content
of an existing blob, with automatic chunking and progress
notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param bytes blob:
Content of blob as an array of bytes.
:param int index:
Start index in the array of bytes.
:param int count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param int max_connections:
Maximum number of parallel connections to use when the blob size exceeds
64MB.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Block Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_not_none('index', index)
_validate_type_bytes('blob', blob)
if index < 0:
raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
stream = BytesIO(blob)
stream.seek(index)
return await self.create_blob_from_stream(
container_name=container_name,
blob_name=blob_name,
stream=stream,
count=count,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
progress_callback=progress_callback,
max_connections=max_connections,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
use_byte_buffer=True
)
async def create_blob_from_text(
self, container_name, blob_name, text, encoding='utf-8',
content_settings=None, metadata=None, validate_content=False,
progress_callback=None, max_connections=2, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None):
'''
Creates a new blob from str/unicode, or updates the content of an
existing blob, with automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str text:
Text to upload to the blob.
:param str encoding:
Python encoding to use to convert the text to bytes.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param int max_connections:
Maximum number of parallel connections to use when the blob size exceeds
64MB.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Block Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('text', text)
if not isinstance(text, bytes):
_validate_not_none('encoding', encoding)
text = text.encode(encoding)
return await self.create_blob_from_bytes(
container_name=container_name,
blob_name=blob_name,
blob=text,
index=0,
count=len(text),
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
lease_id=lease_id,
progress_callback=progress_callback,
max_connections=max_connections,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout)
async def set_standard_blob_tier(
self, container_name, blob_name, standard_blob_tier, timeout=None):
'''
Sets the block blob tiers on the blob. This API is only supported for block blobs on standard storage accounts.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to update.
:param StandardBlobTier standard_blob_tier:
A standard blob tier value to set the blob to. For this version of the library,
this is only applicable to block blobs on standard storage accounts.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('standard_blob_tier', standard_blob_tier)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'tier',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-access-tier': _to_str(standard_blob_tier)
}
await self._perform_request(request)
# -----Helper methods------------------------------------
async def _put_blob(self, container_name, blob_name, blob, content_settings=None,
metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None,
timeout=None):
'''
Creates a blob or updates an existing blob.
See create_blob_from_* for high level
functions that handle the creation and upload of large blobs with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param bytes blob:
Content of blob as bytes (size < 64MB). For larger size, you
must call put_block and put_block_list to set content of blob.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set properties on the blob.
:param metadata:
Name-value pairs associated with the blob as metadata.
:param bool validate_content:
If true, calculates an MD5 hash of the blob content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
blob.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the new Block Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_encryption_required(self.require_encryption, self.key_encryption_key)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Host': f'{self.account_name}.blob.core.windows.net',
'Content-Type': 'application/octet-stream',
'x-ms-blob-type': _to_str(self.blob_type),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_add_metadata_headers(metadata, request)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
blob = _get_data_bytes_only('blob', blob)
if self.key_encryption_key:
encryption_data, blob = _encrypt_blob(blob, self.key_encryption_key)
request.headers['x-ms-meta-encryptiondata'] = encryption_data
request.body = blob
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
return await self._perform_request(request, _parse_base_properties)
async def _put_block(self, container_name, blob_name, block, block_id,
validate_content=False, lease_id=None, timeout=None):
'''
See put_block for more details. This helper method
allows for encryption or other such special behavior because
it is safely handled by the library. These behaviors are
prohibited in the public version of this function.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block', block)
_validate_not_none('block_id', block_id)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'block',
'blockid': _encode_base64(_to_str(block_id)),
'timeout': _int_to_str(timeout),
}
request.headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Host': f'{self.account_name}.blob.core.windows.net',
'Content-Type': 'application/octet-stream',
'x-ms-lease-id': _to_str(lease_id)
}
request.body = _get_data_bytes_or_stream_only('block', block)
if hasattr(request.body, 'read'):
if _len_plus(request.body) is None:
try:
data = b''
for chunk in iter(lambda: request.body.read(4096), b""):
data += chunk
request.body = data
except AttributeError:
raise ValueError(_ERROR_VALUE_SHOULD_BE_STREAM.format('request.body'))
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
await self._perform_request(request)
async def _put_block_list(
self, container_name, blob_name, block_list, content_settings=None,
metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None,
timeout=None, encryption_data=None):
'''
See put_block_list for more details. This helper method
allows for encryption or other such special behavior because
it is safely handled by the library. These behaviors are
prohibited in the public version of this function.
:param str encryption_data:
A JSON formatted string containing the encryption metadata generated for this
blob if it was encrypted all at once upon upload. This should only be passed
in by internal methods.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block_list', block_list)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'blocklist',
'timeout': _int_to_str(timeout),
}
request.headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Host': f'{self.account_name}.blob.core.windows.net',
'Content-Type': 'application/octet-stream',
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
_add_metadata_headers(metadata, request)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
request.body = _get_request_body(_convert_block_list_to_xml(block_list))
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
if encryption_data is not None:
request.headers['x-ms-meta-encryptiondata'] = encryption_data
return await self._perform_request(request, _parse_base_properties)
| 50.104492
| 119
| 0.648547
|
5aee6b01c68f86ad24e2437a745c2e8a97a4eaa1
| 9,942
|
py
|
Python
|
TSS.Py/src/TpmBase.py
|
msftenhanceprovenance/TSS.MSR
|
03f5093cee0a607205cfc620c3c75cabf9499d61
|
[
"MIT"
] | 173
|
2019-05-13T14:32:06.000Z
|
2022-03-27T14:25:46.000Z
|
TSS.Py/src/TpmBase.py
|
msftenhanceprovenance/TSS.MSR
|
03f5093cee0a607205cfc620c3c75cabf9499d61
|
[
"MIT"
] | 76
|
2019-05-07T13:51:47.000Z
|
2022-03-14T18:58:56.000Z
|
TSS.Py/src/TpmBase.py
|
msftenhanceprovenance/TSS.MSR
|
03f5093cee0a607205cfc620c3c75cabf9499d61
|
[
"MIT"
] | 74
|
2019-05-10T14:08:08.000Z
|
2022-03-16T08:38:39.000Z
|
import platform
from .TpmTypes import *
from .TpmDevice import *
Owner = TPM_HANDLE(TPM_RH.OWNER)
Endorsement = TPM_HANDLE(TPM_RH.ENDORSEMENT)
NullSymDef = TPMT_SYM_DEF(TPM_ALG_ID.NULL, 0, TPM_ALG_ID.NULL)
class Session:
def __init__(self,
sessionHandle = None,
nonceTpm = None,
sessionAttributes = TPMA_SESSION.continueSession,
nonceCaller = None
):
self.SessIn = TPMS_AUTH_COMMAND(sessionHandle, nonceCaller, sessionAttributes)
self.SessOut = TPMS_AUTH_RESPONSE(nonceTpm, sessionAttributes)
@staticmethod
def Pw(authValue = None): # Session
s = Session()
s.SessIn.sessionHandle = TPM_HANDLE(TPM_RH.RS_PW)
s.SessIn.nonce = None
s.SessIn.sessionAttributes = TPMA_SESSION.continueSession
s.SessIn.hmac = authValue
s.SessOut.sessionAttributes = TPMA_SESSION.continueSession
return s
# class Session
NullPwSession = Session.Pw()
class TpmBase(object):
def __init__(self, useSimulator = False, host = '127.0.0.1', port = 2321):
if useSimulator:
self.__device = TpmTcpDevice(host, port)
elif platform.system() == 'Windows':
self.__device = TpmTbsDevice()
else:
self.__device = TpmLinuxDevice()
self.__lastResponseCode = TPM_RC.SUCCESS
self.__lastError = None # TpmError
self.enableExceptions(True)
self.__sessions = None
#self.__cmdBuf = None
self.__cmdCode = 0
def connect(self):
try:
self.__device.connect()
except Exception as e:
if isinstance(self.__device, TpmLinuxDevice):
# It is possible that a user mode TRM from tpm2-tools is running
self.__device = TpmTcpDevice('127.0.0.1', 2323, True)
self.__device.connect()
else:
raise
def close(self):
if self.__device:
self.__device.close()
self.__device = None
@property
def lastResponseCode(self):
return self.__lastResponseCode
@property
def lastError(self):
return self.__lastError
def allowErrors(self):
""" For the next TPM command invocation, errors will not cause an exception to be thrown
(use _lastCommandSucceeded or _getLastResponseCode() to check for an error)
Returns:
This object (to allow modifier chaining)
"""
self.__errorsAllowed = True
return self
def enableExceptions(self, enable = True):
""" When exceptions are enabled, errors reported by the TPM or occurred in the TSS (e.g. during
an attempt to communicate with the TPM) will result in throwing an exception of TpmError type.
It will still be possible to use _lastCommandSucceeded(), _getLastResponseCode() methods and
lastError property to check for an error after the exception is intercepted.
Note that in contrast to allowErrors() this method affects all subsequent commands.
"""
self.__exceptionsEnabled = enable
self.__errorsAllowed = not enable
def withSession(self, sess):
""" Specifies a single session handle to use with the next command
Args:
sess Session handle
Returns:
This object (to allow modifier chaining)
"""
self.__sessions = [sess]
return self
def withSessions(self, *sessions):
""" Specifies a single session handle to use with the next command
Args:
sessions Up to 3 session handles
Returns:
This object (to allow modifier chaining)
"""
print('withSessions: ' + str(NewPython))
self.__sessions = list(sessions)
return self
@staticmethod
def __isCommMediumError(respCode):
""" Checks whether the response code is generated by the TSS.JS (i.e. is an extension to the TPM 2.0 spec)
Args:
code Response code returned by TSS.JS
Returns:
true if the response code was generated in the communication channel between the app and the TPM
"""
return (respCode & 0xFFFF0000) == 0x80280000
@staticmethod
def __cleanResponseCode(rawResponse):
if TpmBase.__isCommMediumError(rawResponse):
return TPM_RC(rawResponse)
if rawResponse & TPM_RC.RC_FMT1:
mask = TPM_RC.RC_FMT1 | 0x3F
else:
mask = TPM_RC.RC_WARN | TPM_RC.RC_VER1 | 0x7F
return TPM_RC(rawResponse & mask)
def dispatchCommand(self,
cmdCode, # TPM_CC
req, # ReqStructure derived class
):
handles = req.getHandles()
numAuthHandles = req.numAuthHandles()
cmdBuf = TpmBuffer()
self.__cmdCode = cmdCode
self.__cmdTag = TPM_ST.SESSIONS if numAuthHandles > 0 else TPM_ST.NO_SESSIONS
cmdBuf.writeShort(self.__cmdTag)
cmdBuf.writeInt(0) # to be filled in later
cmdBuf.writeInt(cmdCode)
if handles and len(handles) > 0:
for h in handles:
if not h:
cmdBuf.writeInt(TPM_RH.NULL)
else:
h.toTpm(cmdBuf)
if numAuthHandles > 0:
if not self.__sessions:
self.__sessions = [None] * numAuthHandles
elif len(self.__sessions) < numAuthHandles:
self.sessions = self.__sessions + [None] * (numAuthHandles - len(self.__sessions))
for i in range(numAuthHandles):
if not self.__sessions[i]:
self.__sessions[i] = Session.Pw()
# We do not know the size of the authorization area yet.
# Remember the place to marshal it, ...
authSizePos = cmdBuf.curPos
# ... and marshal a placeholder 0 value for now.
cmdBuf.writeInt(0)
for sess in self.__sessions:
sess.SessIn.toTpm(cmdBuf)
#authSize = cmdBuf.curPos - authSizePos - 4
#cmdBuf.buffer[authSizePos : authSizePos + 4] = intToTpm(authSize, 4)
cmdBuf.writeNumAtPos(cmdBuf.curPos - authSizePos - 4, authSizePos)
self.__sessions = None
self.__lastError = None
# Marshal command parameters
req.toTpm(cmdBuf)
# Fill in command buffer size in the command header
cmdBuf.writeNumAtPos(cmdBuf.curPos, 2)
cmdBuf.trim()
rc = TPM_RC.RETRY
while rc == TPM_RC.RETRY:
respBuf = self.__device.dispatchCommand(cmdBuf.buffer)
rc = intFromTpm(respBuf, 6, 4)
return TpmBuffer(respBuf)
# __dispatchCommand()
@staticmethod
def __generateErrorResponse(rc):
respBuf = TpmBuffer()
respBuf.writeShort(TPM_ST.NO_SESSIONS)
respBuf.writeInt(10)
respBuf.writeInt(rc)
return respBuf
def __generateError(self, respCode, errMsg, errorsAllowed):
self.__lastResponseCode = respCode
self.__lastError = TpmError(respCode, self.__cmdCode, errMsg)
if self.__exceptionsEnabled and not errorsAllowed:
raise(self.__lastError)
return None
def processResponse(self, respBuf, RespType = None):
""" Returns unmarshaled response data structure or None in case of error """
if self.lastError:
return None
errorsAllowed = self.__errorsAllowed
self.__errorsAllowed = not self.__exceptionsEnabled
if respBuf.size < 10:
self.__generateError(TPM_RC.TSS_RESP_BUF_TOO_SHORT,
'Response buffer is too short: ' + str(len(respBuf)), errorsAllowed)
return None
if respBuf.curPos != 0:
raise(Exception('Response buffer reading position is not properly initialized'))
tag = respBuf.readShort() # TPM_ST
respSize = respBuf.readInt()
rc = respBuf.readInt() # TPM_RC
self.__lastResponseCode = TpmBase.__cleanResponseCode(rc)
if (rc == TPM_RC.SUCCESS and tag != int(self.__cmdTag)
or rc != TPM_RC.SUCCESS and tag != int(TPM_ST.NO_SESSIONS)):
self.__generateError(TPM_RC.TSS_RESP_BUF_INVALID_SESSION_TAG,
'Invalid session tag in the response buffer')
return None
if self.__lastResponseCode != TPM_RC.SUCCESS:
self.__generateError(self.lastResponseCode, 'Command {' + str(self.__cmdCode) +
'} failed with error {' + str(self.lastResponseCode) + '}', errorsAllowed)
return None
if not RespType:
return None # No values are expected to be returned by the TPM
resp = RespType()
# Get the handles
if resp.numHandles() > 0:
resp.setHandle(TPM_HANDLE(respBuf.readInt()))
# If a response session is present, response buffer contains a field
# specifying the size of response parameters
respParamsSize = respBuf.readInt() if tag == TPM_ST.SESSIONS else respBuf.size - respBuf.curPos
paramStart = respBuf.curPos
resp.initFromTpm(respBuf)
if respParamsSize != respBuf.curPos - paramStart:
return self.generateError(TPM_RC.TSS_RESP_BUF_INVALID_SIZE,
'Inconsistent TPM response params size: expected ${exp}, actual ${act}'
.format(exp = respParamsSize, act = respBuf.curPos - paramStart),
errorsAllowed)
return resp
# processResponse()
# class TpmBase
| 36.822222
| 115
| 0.598069
|
def6a6283f564f0238ec04039cedcccf5b67fe3c
| 53,427
|
py
|
Python
|
tests/test_codebase/test_mmdet/test_mmdet_models.py
|
grimoire/mmdeploy
|
e84bc30f4a036dd19cb3af854203922a91098e84
|
[
"Apache-2.0"
] | 746
|
2021-12-27T10:50:28.000Z
|
2022-03-31T13:34:14.000Z
|
tests/test_codebase/test_mmdet/test_mmdet_models.py
|
grimoire/mmdeploy
|
e84bc30f4a036dd19cb3af854203922a91098e84
|
[
"Apache-2.0"
] | 253
|
2021-12-28T05:59:13.000Z
|
2022-03-31T18:22:25.000Z
|
tests/test_codebase/test_mmdet/test_mmdet_models.py
|
grimoire/mmdeploy
|
e84bc30f4a036dd19cb3af854203922a91098e84
|
[
"Apache-2.0"
] | 147
|
2021-12-27T10:50:33.000Z
|
2022-03-30T10:44:20.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
import random
from typing import Dict, List
import mmcv
import numpy as np
import pytest
import torch
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase
from mmdeploy.utils.config_utils import get_ir_config
from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs,
get_rewrite_outputs)
import_codebase(Codebase.MMDET)
def seed_everything(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = False
def convert_to_list(rewrite_output: Dict, output_names: List[str]) -> List:
"""Converts output from a dictionary to a list.
The new list will contain only those output values, whose names are in list
'output_names'.
"""
outputs = [
value for name, value in rewrite_output.items() if name in output_names
]
return outputs
def get_anchor_head_model():
"""AnchorHead Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import AnchorHead
model = AnchorHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_ssd_head_model():
"""SSDHead Config."""
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
from mmdet.models import SSDHead
model = SSDHead(
in_channels=(96, 1280, 512, 256, 256, 128),
num_classes=4,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='Normal', layer='Conv2d', std=0.001),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_fcos_head_model():
"""FCOS Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import FCOSHead
model = FCOSHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_focus_backbone_model():
"""Backbone Focus Config."""
from mmdet.models.backbones.csp_darknet import Focus
model = Focus(3, 32)
model.requires_grad_(False)
return model
def get_l2norm_forward_model():
"""L2Norm Neck Config."""
from mmdet.models.necks.ssd_neck import L2Norm
model = L2Norm(16)
torch.nn.init.uniform_(model.weight)
model.requires_grad_(False)
return model
def get_rpn_head_model():
"""RPN Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
nms_pre=0,
max_per_img=100,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0))
from mmdet.models.dense_heads import RPNHead
model = RPNHead(in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_reppoints_head_model():
"""Reppoints Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import RepPointsHead
model = RepPointsHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_single_roi_extractor():
"""SingleRoIExtractor Config."""
from mmdet.models.roi_heads import SingleRoIExtractor
roi_layer = dict(type='RoIAlign', output_size=7, sampling_ratio=2)
out_channels = 1
featmap_strides = [4, 8, 16, 32]
model = SingleRoIExtractor(roi_layer, out_channels, featmap_strides).eval()
return model
def get_gfl_head_model():
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
anchor_generator = dict(
type='AnchorGenerator',
scales_per_octave=1,
octave_base_scale=8,
ratios=[1.0],
strides=[8, 16, 32, 64, 128])
from mmdet.models.dense_heads import GFLHead
model = GFLHead(
num_classes=3,
in_channels=256,
reg_max=3,
test_cfg=test_cfg,
anchor_generator=anchor_generator)
model.requires_grad_(False)
return model
def test_focus_forward_ncnn():
backend_type = Backend.NCNN
check_backend(backend_type)
focus_model = get_focus_backbone_model()
focus_model.cpu().eval()
s = 128
seed_everything(1234)
x = torch.rand(1, 3, s, s)
model_outputs = [focus_model.forward(x)]
wrapped_model = WrapModel(focus_model, 'forward')
rewrite_inputs = {
'x': x,
}
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=None)))
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs[0]):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
assert np.allclose(
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_l2norm_forward(backend_type):
check_backend(backend_type)
l2norm_neck = get_l2norm_forward_model()
l2norm_neck.cpu().eval()
s = 128
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=None)))
seed_everything(1234)
feat = torch.rand(1, 16, s, s)
model_outputs = [l2norm_neck.forward(feat)]
wrapped_model = WrapModel(l2norm_neck, 'forward')
rewrite_inputs = {
'x': feat,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs[0]):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
assert np.allclose(
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
else:
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs[0]):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
assert np.allclose(
model_output[0], rewrite_output, rtol=1e-03, atol=1e-05)
def test_get_bboxes_of_fcos_head_ncnn():
backend_type = Backend.NCNN
check_backend(backend_type)
fcos_head = get_fcos_head_model()
fcos_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['detection_output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
model_type='ncnn_end2end',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, fcos_head.num_classes, pow(2, i), pow(2, i))
for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
seed_everything(9101)
centernesses = [
torch.rand(1, 1, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
fcos_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'centernesses': centernesses
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
assert rewrite_outputs[0].shape[-1] == 6
else:
assert rewrite_outputs.shape[-1] == 6
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.NCNN])
def test_get_bboxes_of_rpn_head(backend_type: Backend):
check_backend(backend_type)
head = get_rpn_head_model()
head.cpu().eval()
s = 4
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, 9, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
# do not run with ncnn backend
run_with_backend = False if backend_type in [Backend.NCNN] else True
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg,
run_with_backend=run_with_backend)
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_get_bboxes_of_gfl_head(backend_type):
check_backend(backend_type)
head = get_gfl_head_model()
head.cpu().eval()
s = 4
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
model_type='ncnn_end2end',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
seed_everything(1234)
cls_score = [
torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 16, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
# do not run with ncnn backend
run_with_backend = False if backend_type in [Backend.NCNN] else True
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg,
run_with_backend=run_with_backend)
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_forward_of_gfl_head(backend_type):
check_backend(backend_type)
head = get_gfl_head_model()
head.cpu().eval()
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=None)))
feats = [torch.rand(1, 256, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
model_outputs = [head.forward(feats)]
wrapped_model = WrapModel(head, 'forward')
rewrite_inputs = {
'feats': feats,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
model_outputs[0] = [*model_outputs[0][0], *model_outputs[0][1]]
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs[0]):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
assert np.allclose(
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
def _replace_r50_with_r18(model):
"""Replace ResNet50 with ResNet18 in config."""
model = copy.deepcopy(model)
if model.backbone.type == 'ResNet':
model.backbone.depth = 18
model.backbone.base_channels = 2
model.neck.in_channels = [2, 4, 8, 16]
return model
@pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME])
@pytest.mark.parametrize('model_cfg_path', [
'tests/test_codebase/test_mmdet/data/single_stage_model.json',
'tests/test_codebase/test_mmdet/data/mask_model.json'
])
def test_forward_of_base_detector(model_cfg_path, backend):
check_backend(backend)
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend.value),
onnx_config=dict(
output_names=['dets', 'labels'], input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1,
))))
model_cfg = mmcv.Config(dict(model=mmcv.load(model_cfg_path)))
model_cfg.model = _replace_r50_with_r18(model_cfg.model)
from mmdet.apis import init_detector
model = init_detector(model_cfg, None, 'cpu')
img = torch.randn(1, 3, 64, 64)
rewrite_inputs = {'img': img}
rewrite_outputs, _ = get_rewrite_outputs(
wrapped_model=model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_single_roi_extractor(backend_type: Backend):
check_backend(backend_type)
single_roi_extractor = get_single_roi_extractor()
output_names = ['roi_feat']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
)))
seed_everything(1234)
out_channels = single_roi_extractor.out_channels
feats = [
torch.rand((1, out_channels, 200, 336)),
torch.rand((1, out_channels, 100, 168)),
torch.rand((1, out_channels, 50, 84)),
torch.rand((1, out_channels, 25, 42)),
]
seed_everything(5678)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
model_inputs = {
'feats': feats,
'rois': rois,
}
model_outputs = get_model_outputs(single_roi_extractor, 'forward',
model_inputs)
backend_outputs, _ = get_rewrite_outputs(
wrapped_model=single_roi_extractor,
model_inputs=model_inputs,
deploy_cfg=deploy_cfg)
if isinstance(backend_outputs, dict):
backend_outputs = backend_outputs.values()
for model_output, backend_output in zip(model_outputs[0], backend_outputs):
model_output = model_output.squeeze().cpu().numpy()
backend_output = backend_output.squeeze()
assert np.allclose(
model_output, backend_output, rtol=1e-03, atol=1e-05)
def get_cascade_roi_head(is_instance_seg=False):
"""CascadeRoIHead Config."""
num_stages = 3
stage_loss_weights = [1, 0.5, 0.25]
bbox_roi_extractor = {
'type': 'SingleRoIExtractor',
'roi_layer': {
'type': 'RoIAlign',
'output_size': 7,
'sampling_ratio': 0
},
'out_channels': 64,
'featmap_strides': [4, 8, 16, 32]
}
all_target_stds = [[0.1, 0.1, 0.2, 0.2], [0.05, 0.05, 0.1, 0.1],
[0.033, 0.033, 0.067, 0.067]]
bbox_head = [{
'type': 'Shared2FCBBoxHead',
'in_channels': 64,
'fc_out_channels': 1024,
'roi_feat_size': 7,
'num_classes': 80,
'bbox_coder': {
'type': 'DeltaXYWHBBoxCoder',
'target_means': [0.0, 0.0, 0.0, 0.0],
'target_stds': target_stds
},
'reg_class_agnostic': True,
'loss_cls': {
'type': 'CrossEntropyLoss',
'use_sigmoid': False,
'loss_weight': 1.0
},
'loss_bbox': {
'type': 'SmoothL1Loss',
'beta': 1.0,
'loss_weight': 1.0
}
} for target_stds in all_target_stds]
mask_roi_extractor = {
'type': 'SingleRoIExtractor',
'roi_layer': {
'type': 'RoIAlign',
'output_size': 14,
'sampling_ratio': 0
},
'out_channels': 64,
'featmap_strides': [4, 8, 16, 32]
}
mask_head = {
'type': 'FCNMaskHead',
'num_convs': 4,
'in_channels': 64,
'conv_out_channels': 64,
'num_classes': 80,
'loss_mask': {
'type': 'CrossEntropyLoss',
'use_mask': True,
'loss_weight': 1.0
}
}
test_cfg = mmcv.Config(
dict(
score_thr=0.05,
nms=mmcv.Config(dict(type='nms', iou_threshold=0.5)),
max_per_img=100,
mask_thr_binary=0.5))
args = [num_stages, stage_loss_weights, bbox_roi_extractor, bbox_head]
kwargs = {'test_cfg': test_cfg}
if is_instance_seg:
args += [mask_roi_extractor, mask_head]
from mmdet.models.roi_heads import CascadeRoIHead
model = CascadeRoIHead(*args, **kwargs).eval()
return model
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_cascade_roi_head(backend_type: Backend):
check_backend(backend_type)
cascade_roi_head = get_cascade_roi_head()
seed_everything(1234)
x = [
torch.rand((1, 64, 200, 304)),
torch.rand((1, 64, 100, 152)),
torch.rand((1, 64, 50, 76)),
torch.rand((1, 64, 25, 38)),
]
proposals = torch.tensor([[587.8285, 52.1405, 886.2484, 341.5644, 0.5]])
img_metas = {
'img_shape': torch.tensor([800, 1216]),
'ori_shape': torch.tensor([800, 1216]),
'scale_factor': torch.tensor([1, 1, 1, 1])
}
model_inputs = {
'x': x,
'proposal_list': [proposals],
'img_metas': [img_metas]
}
output_names = ['results']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1))))
model_inputs = {'x': x, 'proposals': proposals.unsqueeze(0)}
wrapped_model = WrapModel(
cascade_roi_head, 'simple_test', img_metas=[img_metas])
backend_outputs, _ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=model_inputs,
deploy_cfg=deploy_cfg)
assert backend_outputs is not None
def get_fovea_head_model():
"""FoveaHead Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import FoveaHead
model = FoveaHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_get_bboxes_of_fovea_head(backend_type: Backend):
check_backend(backend_type)
fovea_head = get_fovea_head_model()
fovea_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1,
))))
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, fovea_head.num_classes, pow(2, i), pow(2, i))
for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
model_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'img_metas': img_metas
}
model_outputs = get_model_outputs(fovea_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(fovea_head, 'get_bboxes', img_metas=img_metas)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
assert np.allclose(
model_output[:rewrite_output.shape[0]],
rewrite_output,
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type', [Backend.OPENVINO])
def test_cascade_roi_head_with_mask(backend_type: Backend):
check_backend(backend_type)
cascade_roi_head = get_cascade_roi_head(is_instance_seg=True)
seed_everything(1234)
x = [
torch.rand((1, 64, 200, 304)),
torch.rand((1, 64, 100, 152)),
torch.rand((1, 64, 50, 76)),
torch.rand((1, 64, 25, 38)),
]
proposals = torch.tensor([[587.8285, 52.1405, 886.2484, 341.5644, 0.5]])
img_metas = {
'img_shape': torch.tensor([800, 1216]),
'ori_shape': torch.tensor([800, 1216]),
'scale_factor': torch.tensor([1, 1, 1, 1])
}
output_names = ['bbox_results', 'segm_results']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1))))
model_inputs = {'x': x, 'proposals': proposals.unsqueeze(0)}
wrapped_model = WrapModel(
cascade_roi_head, 'simple_test', img_metas=[img_metas])
backend_outputs, _ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=model_inputs,
deploy_cfg=deploy_cfg)
bbox_results = backend_outputs[0]
segm_results = backend_outputs[1]
assert bbox_results is not None
assert segm_results is not None
def get_yolov3_head_model():
"""yolov3 Head Config."""
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
from mmdet.models.dense_heads import YOLOV3Head
model = YOLOV3Head(
num_classes=4,
in_channels=[16, 8, 4],
out_channels=[32, 16, 8],
test_cfg=test_cfg)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_yolov3_head_get_bboxes(backend_type):
"""Test get_bboxes rewrite of yolov3 head."""
check_backend(backend_type)
yolov3_head = get_yolov3_head_model()
yolov3_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.45,
confidence_threshold=0.005,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1,
))))
seed_everything(1234)
pred_maps = [
torch.rand(1, 27, 5, 5),
torch.rand(1, 27, 10, 10),
torch.rand(1, 27, 20, 20)
]
# to get outputs of pytorch model
model_inputs = {'pred_maps': pred_maps, 'img_metas': img_metas}
model_outputs = get_model_outputs(yolov3_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(
yolov3_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'pred_maps': pred_maps,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
assert np.allclose(
model_output[:rewrite_output.shape[0]],
rewrite_output,
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
def test_yolov3_head_get_bboxes_ncnn():
"""Test get_bboxes rewrite of yolov3 head."""
backend_type = Backend.NCNN
check_backend(backend_type)
yolov3_head = get_yolov3_head_model()
yolov3_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['detection_output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
model_type='ncnn_end2end',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.45,
confidence_threshold=0.005,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=10,
background_label_id=-1,
))))
seed_everything(1234)
pred_maps = [
torch.rand(1, 27, 5, 5),
torch.rand(1, 27, 10, 10),
torch.rand(1, 27, 20, 20)
]
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(
yolov3_head, 'get_bboxes', img_metas=img_metas[0], with_nms=True)
rewrite_inputs = {
'pred_maps': pred_maps,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
assert rewrite_outputs[0].shape[-1] == 6
else:
assert rewrite_outputs.shape[-1] == 6
def get_yolox_head_model():
"""YOLOX Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import YOLOXHead
model = YOLOXHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_yolox_head_get_bboxes(backend_type: Backend):
"""Test get_bboxes rewrite of YOLOXHead."""
check_backend(backend_type)
yolox_head = get_yolox_head_model()
yolox_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=20,
pre_top_k=-1,
keep_top_k=10,
background_label_id=-1,
))))
seed_everything(1234)
cls_scores = [
torch.rand(1, yolox_head.num_classes, pow(2, i), pow(2, i))
for i in range(3, 0, -1)
]
seed_everything(5678)
bbox_preds = [
torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(3, 0, -1)
]
seed_everything(9101)
objectnesses = [
torch.rand(1, 1, pow(2, i), pow(2, i)) for i in range(3, 0, -1)
]
# to get outputs of pytorch model
model_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
'img_metas': img_metas
}
model_outputs = get_model_outputs(yolox_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(
yolox_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze().cpu().numpy()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
min_shape = min(model_output.shape[0], rewrite_output.shape[0], 5)
assert np.allclose(
model_output[:min_shape],
rewrite_output[:min_shape],
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
def test_yolox_head_get_bboxes_ncnn():
"""Test get_bboxes rewrite of yolox head for ncnn."""
backend_type = Backend.NCNN
check_backend(backend_type)
yolox_head = get_yolox_head_model()
yolox_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['detection_output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=20,
pre_top_k=5000,
keep_top_k=10,
background_label_id=0,
))))
seed_everything(1234)
cls_scores = [
torch.rand(1, yolox_head.num_classes, pow(2, i), pow(2, i))
for i in range(3, 0, -1)
]
seed_everything(5678)
bbox_preds = [
torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(3, 0, -1)
]
seed_everything(9101)
objectnesses = [
torch.rand(1, 1, pow(2, i), pow(2, i)) for i in range(3, 0, -1)
]
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(yolox_head, 'get_bboxes', img_metas=img_metas)
rewrite_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
assert rewrite_outputs[0].shape[-1] == 6
else:
assert rewrite_outputs.shape[-1] == 6
def get_vfnet_head_model():
"""VFNet Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import VFNetHead
model = VFNetHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
model.cpu().eval()
return model
@pytest.mark.parametrize('backend_type',
[Backend.OPENVINO, Backend.ONNXRUNTIME])
def test_get_bboxes_of_vfnet_head(backend_type: Backend):
"""Test get_bboxes rewrite of VFNet head."""
check_backend(backend_type)
vfnet_head = get_vfnet_head_model()
vfnet_head.cpu().eval()
s = 16
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1,
))))
seed_everything(1234)
cls_score = [
torch.rand(1, vfnet_head.num_classes, pow(2, i), pow(2, i))
for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
seed_everything(9101)
model_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'img_metas': img_metas
}
model_outputs = get_model_outputs(vfnet_head, 'get_bboxes', model_inputs)
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
vfnet_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {'cls_scores': cls_score, 'bbox_preds': bboxes}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
min_shape = min(model_output.shape[0], rewrite_output.shape[0])
assert np.allclose(
model_output[:min_shape],
rewrite_output[:min_shape],
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
def get_deploy_cfg(backend_type: Backend, ir_type: str):
return mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(
type=ir_type,
output_names=['dets', 'labels'],
input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
@pytest.mark.parametrize('backend_type, ir_type',
[(Backend.ONNXRUNTIME, 'onnx'),
(Backend.OPENVINO, 'onnx'),
(Backend.TORCHSCRIPT, 'torchscript')])
def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str):
"""Test get_bboxes rewrite of base dense head."""
check_backend(backend_type)
anchor_head = get_anchor_head_model()
anchor_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
deploy_cfg = get_deploy_cfg(backend_type, ir_type)
output_names = get_ir_config(deploy_cfg).get('output_names', None)
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of pytorch model
model_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'img_metas': img_metas
}
model_outputs = get_model_outputs(anchor_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
anchor_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
assert np.allclose(
model_output[:rewrite_output.shape[0]],
rewrite_output,
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
def test_base_dense_head_get_bboxes__ncnn():
"""Test get_bboxes rewrite of base dense head."""
backend_type = Backend.NCNN
check_backend(backend_type)
anchor_head = get_anchor_head_model()
anchor_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
model_type='ncnn_end2end',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
anchor_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
rewrite_outputs = rewrite_outputs[0]
assert rewrite_outputs.shape[-1] == 6
@pytest.mark.parametrize('is_dynamic', [True, False])
def test_ssd_head_get_bboxes__ncnn(is_dynamic: bool):
"""Test get_bboxes rewrite of ssd head for ncnn."""
check_backend(Backend.NCNN)
ssd_head = get_ssd_head_model()
ssd_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['output']
input_names = ['input']
dynamic_axes = None
if is_dynamic:
dynamic_axes = {
input_names[0]: {
2: 'height',
3: 'width'
},
output_names[0]: {
1: 'num_dets',
}
}
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=Backend.NCNN.value),
onnx_config=dict(
input_names=input_names,
output_names=output_names,
input_shape=None,
dynamic_axes=dynamic_axes),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
model_type='ncnn_end2end',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
# For the ssd_head:
# the cls_score's size: (1, 30, 20, 20), (1, 30, 10, 10),
# (1, 30, 5, 5), (1, 30, 3, 3), (1, 30, 2, 2), (1, 30, 1, 1)
# the bboxes's size: (1, 24, 20, 20), (1, 24, 10, 10),
# (1, 24, 5, 5), (1, 24, 3, 3), (1, 24, 2, 2), (1, 24, 1, 1)
feat_shape = [20, 10, 5, 3, 2, 1]
num_prior = 6
seed_everything(1234)
cls_score = [
torch.rand(1, 30, feat_shape[i], feat_shape[i])
for i in range(num_prior)
]
seed_everything(5678)
bboxes = [
torch.rand(1, 24, feat_shape[i], feat_shape[i])
for i in range(num_prior)
]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.tensor([s, s]) if is_dynamic else [s, s]
wrapped_model = WrapModel(
ssd_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
rewrite_outputs = rewrite_outputs[0]
assert rewrite_outputs.shape[-1] == 6
@pytest.mark.parametrize('backend_type, ir_type', [(Backend.OPENVINO, 'onnx')])
def test_reppoints_head_get_bboxes(backend_type: Backend, ir_type: str):
"""Test get_bboxes rewrite of base dense head."""
check_backend(backend_type)
dense_head = get_reppoints_head_model()
dense_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
deploy_cfg = get_deploy_cfg(backend_type, ir_type)
output_names = get_ir_config(deploy_cfg).get('output_names', None)
# the cls_score's size: (1, 4, 32, 32), (1, 4, 16, 16),
# (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2).
# the bboxes's size: (1, 4, 32, 32), (1, 4, 16, 16),
# (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of pytorch model
model_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'img_metas': img_metas
}
model_outputs = get_model_outputs(dense_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
dense_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
assert np.allclose(
model_output[:rewrite_output.shape[0]],
rewrite_output,
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type, ir_type', [(Backend.OPENVINO, 'onnx')])
def test_reppoints_head_points2bbox(backend_type: Backend, ir_type: str):
"""Test get_bboxes rewrite of base dense head."""
check_backend(backend_type)
dense_head = get_reppoints_head_model()
dense_head.cpu().eval()
output_names = ['output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(
input_shape=None,
input_names=['pts'],
output_names=output_names)))
# the cls_score's size: (1, 4, 32, 32), (1, 4, 16, 16),
# (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2).
# the bboxes's size: (1, 4, 32, 32), (1, 4, 16, 16),
# (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2)
seed_everything(1234)
pts = torch.rand(1, 18, 16, 16)
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(dense_head, 'points2bbox', y_first=True)
rewrite_inputs = {'pts': pts}
_ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
| 33.814557
| 79
| 0.587924
|
e4c3d1fd6f9e9d1b804c41169b4ade1404b14270
| 1,577
|
py
|
Python
|
src/chembl_beaker/beaker/core_apps/calculations/impl.py
|
chembl/chembl_beaker
|
0ea3a8efc9220fce5334cb37ac2239e7189b44ef
|
[
"Apache-2.0"
] | 28
|
2015-08-12T07:22:48.000Z
|
2022-02-26T03:50:27.000Z
|
src/chembl_beaker/beaker/core_apps/calculations/impl.py
|
chembl/chembl_beaker
|
0ea3a8efc9220fce5334cb37ac2239e7189b44ef
|
[
"Apache-2.0"
] | 18
|
2015-03-03T12:03:12.000Z
|
2021-06-16T11:03:05.000Z
|
src/chembl_beaker/beaker/core_apps/calculations/impl.py
|
chembl/chembl_beaker
|
0ea3a8efc9220fce5334cb37ac2239e7189b44ef
|
[
"Apache-2.0"
] | 13
|
2015-02-16T12:35:01.000Z
|
2021-08-02T09:40:30.000Z
|
from beaker.utils.io import _parseMolData
from chembl_structure_pipeline.standardizer import parse_molblock
from rdkit import Chem
import io
def _getSDFString(mols):
sio = io.StringIO()
_create_sdf(sio, mols)
return sio.getvalue()
def _create_sdf(f, mols):
for mol, props in mols:
m = Chem.MolToMolBlock(mol, kekulize=True)
if props:
m += props
f.write(m + '\n$$$$\n')
# ----------------------------------------------------------------------------------------------------------------------
def remove_hs_from_mol(m):
indices = []
for atom in m.GetAtoms():
if atom.GetAtomicNum() == 1 and not atom.GetIsotope():
bnd = atom.GetBonds()[0]
if not (bnd.GetBondDir() in (Chem.BondDir.BEGINWEDGE, Chem.BondDir.BEGINDASH)) and \
not (bnd.HasProp("_MolFileBondStereo") and bnd.GetUnsignedProp("_MolFileBondStereo") in (1, 6)):
indices.append(atom.GetIdx())
mol = Chem.RWMol(m)
for index in sorted(indices, reverse=True):
mol.RemoveAtom(index)
return mol
def _removeHs(data):
mols = _parseMolData(data, loadMol=False, useRDKitChemistry=False)
ms = []
for molblock in mols:
props = molblock.split("M END")[1].strip()
props = props if len(props) > 1 else None
mol = parse_molblock(molblock, useRDKitChemistry=False)
Chem.FastFindRings(mol)
mol.UpdatePropertyCache(strict=False)
mol = remove_hs_from_mol(mol)
ms.append((mol, props))
return _getSDFString(ms)
| 31.54
| 120
| 0.588459
|
b1f8498afd771c45b8551894072392ccf659627b
| 8,456
|
py
|
Python
|
scripts/pipeline/a01d_tst_feature_engineering.py
|
Iolaum/Phi1337
|
c73b01cb85c0187ed5c23c672d4f3d05a6934a9f
|
[
"Apache-2.0"
] | null | null | null |
scripts/pipeline/a01d_tst_feature_engineering.py
|
Iolaum/Phi1337
|
c73b01cb85c0187ed5c23c672d4f3d05a6934a9f
|
[
"Apache-2.0"
] | null | null | null |
scripts/pipeline/a01d_tst_feature_engineering.py
|
Iolaum/Phi1337
|
c73b01cb85c0187ed5c23c672d4f3d05a6934a9f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import os
import pandas as pd
import re
import nltk
import pickle
from nltk.stem.porter import *
from nltk.stem.snowball import SnowballStemmer
from unidecode import unidecode
stops = set(nltk.corpus.stopwords.words("english"))
def fixtypos(training_data):
# traing_data to be given when called
with open("../../dataset/misstypo.p", 'rb') as f:
dic = pickle.load(f)
print("Started replacing typos in search terms")
print("This may take a while...")
training_data['search_term'] = training_data['search_term'].replace(dic)
return training_data
def tokenize_and_stem(text, return_text=False, remove_stop_words=True):
if isinstance(text, str):
# text = text.decode('utf-8')
stemmer = SnowballStemmer("english")
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
meaningful_words = [stemmer.stem(t) for t in tokens]
if remove_stop_words:
meaningful_words = [w for w in meaningful_words if w not in stops]
return " ".join(meaningful_words) if return_text else meaningful_words
return text
def common_words(s1, s2):
words, cnt = s1.split(), 0
for word in words:
if s2.find(word) >= 0:
cnt += 1
return cnt
def find_common(txt):
try:
return common_words(txt.split('\t')[0], txt.split('\t')[1])
except:
return 0
def find_occurences(s1, s2):
return s2.count(s1)
def brand_ratio(series1, series2):
new_series = []
for index, value_1 in series1.iteritems():
value_2 = series2.iloc[index]
if int(value_2) == 0:
new_series.append(value_1)
else:
new_series.append(value_1/value_2)
return pd.Series(new_series)
def word_count(val):
try:
return len(val.split())
except:
return ""
return None
def preprocess_text(text):
if isinstance(text, str):
# text = re.sub(r'[^\x00-\x7f]', r'', text)
text = text.lower()
text = text.replace(" ", " ")
text = text.replace(",", "")
text = text.replace("$", " ")
text = text.replace("?", " ")
text = text.replace("-", " ")
text = text.replace("//", "/")
text = text.replace("..", ".")
text = text.replace(" / ", " ")
text = text.replace(" \\ ", " ")
text = text.replace(".", " . ")
text = re.sub(r"([0-9])( *)\.( *)([0-9])", r"\1.\4", text)
text = re.sub(r"([0-9]+)( *)(inches|inch|in|')\.?", r"\1in. ", text)
text = re.sub(r"([0-9]+)( *)(foot|feet|ft|'')\.?", r"\1ft. ", text)
text = re.sub(r"([0-9]+)( *)(pounds|pound|lbs|lb)\.?", r"\1lb. ", text)
text = re.sub(r"([0-9]+)( *)(square|sq) ?\.?(feet|foot|ft)\.?", r"\1sq.ft. ", text)
text = re.sub(r"([0-9]+)( *)(cubic|cu) ?\.?(feet|foot|ft)\.?", r"\1cu.ft. ", text)
text = re.sub(r"([0-9]+)( *)(gallons|gallon|gal)\.?", r"\1gal. ", text)
text = re.sub(r"([0-9]+)( *)(ounces|ounce|oz)\.?", r"\1oz. ", text)
text = re.sub(r"([0-9]+)( *)(centimeters|cm)\.?", r"\1cm. ", text)
text = re.sub(r"([0-9]+)( *)(milimeters|mm)\.?", r"\1mm. ", text)
text = text.replace("°", " degrees ")
text = re.sub(r"([0-9]+)( *)(degrees|degree)\.?", r"\1deg. ", text)
text = text.replace(" v ", " volts ")
text = re.sub(r"([0-9]+)( *)(volts|volt)\.?", r"\1volt. ", text)
text = re.sub(r"([0-9]+)( *)(watts|watt)\.?", r"\1watt. ", text)
text = re.sub(r"([0-9]+)( *)(amperes|ampere|amps|amp)\.?", r"\1amp. ", text)
return text
def preprocess_data():
if os.path.isfile("../../dataset/preprocessed_training_data_t.csv"):
print("Found Preprocessed DataFrame")
return pd.read_csv("../../dataset/preprocessed_training_data_t.csv")
else:
print("Preprocessing Started")
print("")
training_data = pd.read_csv("../../dataset/test.csv", encoding="ISO-8859-1")
print(training_data.isnull().sum())
descriptions = pd.read_csv("../../dataset/product_descriptions.csv", encoding="ISO-8859-1")
attributes = pd.read_csv("../../dataset/attributes.csv")
brands = attributes[attributes.name == "MFG Brand Name"][["product_uid", "value"]].rename(
columns={"value": "brand"})
training_data = fixtypos(training_data)
print("Preprocess Search Terms")
training_data['search_term'] = training_data['search_term'].map(
lambda i: tokenize_and_stem(preprocess_text(str(unidecode(i))), return_text=True, remove_stop_words=False))
print("Preprocess Titles")
training_data['product_title'] = training_data['product_title'].map(
lambda i: tokenize_and_stem(preprocess_text(str(unidecode(i))), return_text=True, remove_stop_words=True))
print("Preprocess Descriptions")
descriptions['product_description'] = descriptions['product_description'].map(
lambda i: tokenize_and_stem(preprocess_text(str(unidecode(i))), return_text=True, remove_stop_words=True))
#print(descriptions['product_description'])
print("Preprocess Brands")
brands['brand'] = brands['brand'].map(
lambda i: tokenize_and_stem(preprocess_text(re.sub(r'[^\x00-\x7f]', r'', str(i))), return_text=True, remove_stop_words=False))
print("Merge data with descriptions")
training_data = pd.merge(training_data, descriptions, how='left', on='product_uid')
print("Merge data with brands")
training_data = pd.merge(training_data, brands, how='left', on='product_uid')
training_data['info'] = training_data['search_term'] + "\t" + training_data['product_title'] + "\t" + \
training_data['product_description']
training_data.to_csv('../../dataset/preprocessed_training_data_t.csv')
print(training_data.isnull().sum())
return training_data
def feature_generation():
training_data = preprocess_data()
print(training_data)
print(training_data.isnull().sum())
print("")
print("Creating Feature Dataframe")
feature_df = pd.DataFrame(
columns=[
'search_term_length',
'search_word_count',
'title_word_count',
'desc_word_count',
'search_text_occurences_in_title',
'search_text_occurences_in_description',
'search_last_word_in_title',
'search_last_word_in_description',
'search_title_common_words',
'search_description_common_words',
'brand_word_count',
'search_brand_common_words',
'brand_rate',
'brands_numerical',
],
)
training_data['attr'] = training_data['search_term'] + "\t" + training_data['brand']
brands = pd.unique(training_data.brand.ravel())
d = {}
i = 1000
for s in brands:
d[s] = i
i += 3
def num_brand(val):
if val == "":
return 0
return d[val]
feature_df['search_term_length'] = training_data['search_term'].map(lambda i: len(i))
feature_df['search_word_count'] = training_data['search_term'].map(lambda i: len(i.split())).astype(np.int64)
feature_df['title_word_count'] = training_data['product_title'].map(lambda i: len(i.split())).astype(np.int64)
feature_df['desc_word_count'] = training_data['product_description'].map(lambda i: len(i.split())).astype(np.int64)
feature_df['search_text_occurences_in_title'] = training_data['info'].map(
lambda i: find_occurences(i.split('\t')[0], i.split('\t')[1]))
feature_df['search_text_occurences_in_description'] = training_data['info'].map(
lambda i: find_occurences(i.split('\t')[0], i.split('\t')[2]))
feature_df['search_last_word_in_title'] = training_data['info'].map(
lambda i: find_occurences(i.split('\t')[0].split(" ")[-1], i.split('\t')[1]))
feature_df['search_last_word_in_description'] = training_data['info'].map(
lambda i: find_occurences(i.split('\t')[0].split(" ")[-1], i.split('\t')[2]))
feature_df['search_title_common_words'] = training_data['info'].map(
lambda i: common_words(i.split('\t')[0], i.split('\t')[1]))
feature_df['search_description_common_words'] = training_data['info'].map(
lambda i: common_words(i.split('\t')[0], i.split('\t')[2]))
training_data['brand'] = training_data['brand'].fillna("")
training_data['attr'] = training_data['attr'].fillna("")
feature_df['brand_word_count'] = training_data['brand'].map(lambda i: word_count(i)).astype(np.int64)
feature_df['search_brand_common_words'] = training_data['attr'].map(
lambda i: find_common(i))
feature_df['brand_rate'] = brand_ratio(feature_df['search_brand_common_words'], feature_df['brand_word_count'])
feature_df['brands_numerical'] = training_data['brand'].map(lambda x: num_brand(x))
feature_df.to_csv('../../dataset/features_t.csv')
if __name__ == "__main__":
feature_generation()
| 34.942149
| 129
| 0.678217
|
540587e59baeeac5543ce28f661ed595a5a027fe
| 506
|
py
|
Python
|
examples/button_group.py
|
knowledgejunkie/guizero
|
6bd78630ae2b1a6b87c3eca59777c95e66af08d2
|
[
"BSD-3-Clause"
] | 2
|
2020-05-06T03:07:05.000Z
|
2020-06-28T05:27:48.000Z
|
examples/button_group.py
|
knowledgejunkie/guizero
|
6bd78630ae2b1a6b87c3eca59777c95e66af08d2
|
[
"BSD-3-Clause"
] | null | null | null |
examples/button_group.py
|
knowledgejunkie/guizero
|
6bd78630ae2b1a6b87c3eca59777c95e66af08d2
|
[
"BSD-3-Clause"
] | null | null | null |
from guizero import App, ButtonGroup
def selected():
print(choice.value + " " + choice2.value)
app = App()
choice = ButtonGroup(app, options=["cheese", "ham", "salad"], command=selected)
# You can use specific values for the button group by passing them as a 2d list.
# choice = ButtonGroup(app, options=[["cheese", "c"], ["ham", "h"], ["salad", "s"]], selected="h", command=selected)
choice2 = ButtonGroup(app, command=selected)
choice2.append("sandwich")
choice2.append("salad")
app.display()
| 33.733333
| 116
| 0.685771
|
a932f2616dd719e533e2c4c54c6db9003d885f09
| 148
|
py
|
Python
|
ftpdata/Directory.py
|
dropyourcoffee/ftpdata
|
b1c9e95d77aea66a3091d38052eada902dcfc4b5
|
[
"MIT"
] | null | null | null |
ftpdata/Directory.py
|
dropyourcoffee/ftpdata
|
b1c9e95d77aea66a3091d38052eada902dcfc4b5
|
[
"MIT"
] | null | null | null |
ftpdata/Directory.py
|
dropyourcoffee/ftpdata
|
b1c9e95d77aea66a3091d38052eada902dcfc4b5
|
[
"MIT"
] | null | null | null |
import os
class Directory:
def __init__(self, filepath):
self.filepath = filepath
self.filename = os.path.split(filepath)[-1]
| 18.5
| 51
| 0.655405
|
7e3c52353a2bfeeeffaed6c17d94c484cdc94ed1
| 7,415
|
py
|
Python
|
tests/test_server.py
|
vincentsarago/rio-glui
|
04b4d0fcfb3394aa2e994f5192e68aab6426dfcf
|
[
"MIT"
] | 42
|
2018-06-29T14:07:40.000Z
|
2022-03-10T08:43:32.000Z
|
tests/test_server.py
|
vincentsarago/rio-glui
|
04b4d0fcfb3394aa2e994f5192e68aab6426dfcf
|
[
"MIT"
] | 9
|
2018-07-02T12:41:16.000Z
|
2020-05-22T13:35:42.000Z
|
tests/test_server.py
|
vincentsarago/rio-glui
|
04b4d0fcfb3394aa2e994f5192e68aab6426dfcf
|
[
"MIT"
] | 11
|
2018-07-01T17:28:48.000Z
|
2021-11-12T08:18:30.000Z
|
"""tests rio_glui.server."""
import os
import numpy
from tornado.testing import AsyncHTTPTestCase
import mercantile
from rio_tiler.utils import tile_read
from rio_glui.raster import RasterTiles
from rio_glui.server import TileServer
raster_path = os.path.join(
os.path.dirname(__file__), "fixtures", "16-21560-29773_small_ycbcr.tif"
)
raster_ndvi_path = os.path.join(os.path.dirname(__file__), "fixtures", "ndvi_cogeo.tif")
invalid_raster_path = os.path.join(
os.path.dirname(__file__), "fixtures", "16-21560-29773_small.tif"
)
def test_TileServer_default():
"""Should work as expected (create TileServer object)."""
r = RasterTiles(raster_path)
app = TileServer(r)
assert app.raster == r
assert app.port == 8080
assert not app.server
assert app.tiles_format == "png"
assert app.gl_tiles_size == 512
assert app.gl_tiles_minzoom == 0
assert app.gl_tiles_maxzoom == 22
def test_TileServer_1b():
"""Should work as expected (create TileServer object)."""
r = RasterTiles(raster_ndvi_path)
app = TileServer(
r,
tiles_format="jpg",
gl_tiles_minzoom=13,
gl_tiles_maxzoom=19,
gl_tiles_size=256,
scale=((-1, 1),),
colormap="cfastie",
port=5000,
)
assert app.raster == r
assert app.port == 5000
assert not app.server
assert app.tiles_format == "jpg"
assert app.gl_tiles_size == 256
assert app.gl_tiles_minzoom == 13
assert app.gl_tiles_maxzoom == 19
def test_TileServer_options():
"""Should work as expected (create TileServer object)."""
r = RasterTiles(raster_path)
app = TileServer(
r,
tiles_format="jpg",
gl_tiles_minzoom=13,
gl_tiles_maxzoom=19,
gl_tiles_size=256,
port=5000,
)
assert app.raster == r
assert app.port == 5000
assert not app.server
assert app.tiles_format == "jpg"
assert app.gl_tiles_size == 256
assert app.gl_tiles_minzoom == 13
assert app.gl_tiles_maxzoom == 19
def test_TileServer_raster_tilesize():
"""Should work as expected (create TileServer object)."""
r = RasterTiles(raster_path, tiles_size=256)
app = TileServer(r)
assert app.raster == r
assert not app.server
assert app.tiles_format == "png"
assert app.gl_tiles_size == 256
def test_TileServer_raster_get_bounds():
"""Should work as expected."""
r = RasterTiles(raster_path)
app = TileServer(r)
assert app.raster == r
assert app.get_bounds() == r.get_bounds()
def test_TileServer_raster_get_center():
"""Should work as expected."""
r = RasterTiles(raster_path)
app = TileServer(r)
assert app.raster == r
assert app.get_center() == r.get_center()
def test_TileServer_get_tiles_url():
"""Should work as expected (create TileServer object and get tiles endpoint)."""
r = RasterTiles(raster_path)
app = TileServer(r)
assert app.get_tiles_url() == "http://127.0.0.1:8080/tiles/{z}/{x}/{y}.png"
def test_TileServer_get_template_url():
"""Should work as expected (create TileServer object and get template url)."""
r = RasterTiles(raster_path)
app = TileServer(r)
assert app.get_template_url() == "http://127.0.0.1:8080/index.html"
def test_TileServer_get_playground_url():
"""Should work as expected (create TileServer object and get playground url)."""
r = RasterTiles(raster_path)
app = TileServer(r)
assert app.get_playground_url() == "http://127.0.0.1:8080/playground.html"
class TestHandlers(AsyncHTTPTestCase):
"""Test tornado handlers."""
def get_app(self):
"""Initialize app."""
r = RasterTiles(raster_path)
return TileServer(r).app
def test_get_root(self):
"""Should return error on root query."""
response = self.fetch("/")
self.assertEqual(response.code, 404)
def test_tile(self):
"""Should return tile buffer."""
response = self.fetch("/tiles/18/86240/119094.png")
self.assertEqual(response.code, 200)
self.assertTrue(response.buffer)
self.assertEqual(response.headers["Content-Type"], "image/png")
def test_tileColor(self):
"""Should apply color ops and return tile buffer."""
response = self.fetch("/tiles/18/86240/119094.png?color=gamma%20b%201.8")
self.assertEqual(response.code, 200)
self.assertTrue(response.buffer)
self.assertEqual(response.headers["Content-Type"], "image/png")
def test_tileJpeg(self):
"""Should return tile jpeg buffer."""
response = self.fetch("/tiles/18/86240/119094.jpg")
self.assertEqual(response.code, 200)
self.assertTrue(response.buffer)
self.assertEqual(response.headers["Content-Type"], "image/jpg")
def test_tileNotFound(self):
"""Should error with tile doesn't exits."""
response = self.fetch("/tiles/18/8624/119094.png")
self.assertEqual(response.code, 404)
def test_TemplateSimple(self):
"""Should find the template."""
response = self.fetch("/index.html")
self.assertEqual(response.code, 200)
def test_TemplatePlayground(self):
"""Should find the template."""
response = self.fetch("/playground.html")
self.assertEqual(response.code, 200)
class TestHandlersRescale(AsyncHTTPTestCase):
"""Test tornado handlers."""
def get_app(self):
"""Initialize app."""
r = RasterTiles(raster_path)
return TileServer(r, scale=((1, 240),)).app
def test_tile(self):
"""Should return tile buffer."""
response = self.fetch("/tiles/18/86240/119094.png")
self.assertEqual(response.code, 200)
self.assertTrue(response.buffer)
self.assertEqual(response.headers["Content-Type"], "image/png")
class TestHandlers1B(AsyncHTTPTestCase):
"""Test tornado handlers."""
def get_app(self):
"""Initialize app."""
r = RasterTiles(raster_ndvi_path, tiles_size=32)
return TileServer(r, gl_tiles_size=32, scale=((-1, 1),), colormap="cfastie").app
def test_tile(self):
"""Should return tile buffer."""
response = self.fetch("/tiles/9/142/205.png")
self.assertEqual(response.code, 200)
self.assertTrue(response.buffer)
self.assertEqual(response.headers["Content-Type"], "image/png")
class CustomRaster(RasterTiles):
"""Custom RasterTiles."""
def read_tile(self, z, x, y):
"""Read raster tile data and mask."""
mercator_tile = mercantile.Tile(x=x, y=y, z=z)
tile_bounds = mercantile.xy_bounds(mercator_tile)
data, mask = tile_read(
self.path,
tile_bounds,
self.tiles_size,
indexes=self.indexes,
nodata=self.nodata,
)
data = (data[0] + data[1]) / 2
return data.astype(numpy.uint8), mask
class TestHandlersCustom(AsyncHTTPTestCase):
"""Test tornado handlers."""
def get_app(self):
"""Initialize app."""
r = CustomRaster(raster_path)
return TileServer(r).app
def test_tile(self):
"""Should return tile buffer."""
response = self.fetch("/tiles/18/86240/119094.png")
self.assertEqual(response.code, 200)
self.assertTrue(response.buffer)
self.assertEqual(response.headers["Content-Type"], "image/png")
| 30.895833
| 88
| 0.652192
|
989f534bee589b32a416b6c9a14fed167336188d
| 7,958
|
py
|
Python
|
ml_workflow/viz_utils.py
|
BenjaminHabert/ml_workflow
|
266e503e895defe510d657a2a54ea19623f9e181
|
[
"MIT"
] | 1
|
2021-02-03T17:45:09.000Z
|
2021-02-03T17:45:09.000Z
|
ml_workflow/viz_utils.py
|
BenjaminHabert/ml_workflow
|
266e503e895defe510d657a2a54ea19623f9e181
|
[
"MIT"
] | 1
|
2021-01-25T15:56:44.000Z
|
2021-01-25T15:56:44.000Z
|
ml_workflow/viz_utils.py
|
BenjaminHabert/ml_workflow
|
266e503e895defe510d657a2a54ea19623f9e181
|
[
"MIT"
] | 1
|
2021-02-03T17:45:16.000Z
|
2021-02-03T17:45:16.000Z
|
import os
import sys
import pydot
import datetime as dt
import re
import IPython
from IPython.display import SVG, display
from . import viz_utils_node_detail_writer
from . import viz_utils_layer
from . import rule
class VizUtils:
rankdir = 'TB'
dpi = 96
SHAPE_BY_CLASS_NAME = {
'Rule' : 'rectangle',
'DataSource' : 'cylinder',
'default': 'ovale'
}
@staticmethod
def check_pydot():
"""Returns True if PyDot and Graphviz are available."""
if pydot is None:
return False
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
return True
except (OSError, pydot.InvocationException):
return False
@staticmethod
def raise_error_if_no_pydot():
if not VizUtils.check_pydot():
message = (
'Failed to import pydot. You must `pip install pydot` '
'and install graphviz (https://graphviz.gitlab.io/download/), ',
'for `pydotprint` to work.'
)
if 'IPython.core.magics.namespace' in sys.modules:
# We don't raise an exception here in order to avoid crashing
# notebook tests where graphviz is not available.
print(message)
return
else:
raise ImportError(message)
@staticmethod
def add_edge(dot, src, dst):
if not dot.get_edge(src, dst):
dot.add_edge(pydot.Edge(src, dst))
@staticmethod
def add_sub_label(layer):
node = layer if isinstance(layer, rule.Rule) else layer.get_leaf_origin()
if node is None:
return ''
sub_label = []
if node.has_version():
sub_label.append(f"version : {node.get_version()}")
if node.get_branch():
sub_label.append(f"branch : {node.get_branch()}")
if node.get_tags():
if sub_label:
sub_label.append('\n')
sub_label.append(f"tags : {node.get_tags()}")
if sub_label:
return f"\n{' '.join(sub_label)}"
return ''
@staticmethod
def get_label(layer):
label = str(layer)
label += VizUtils.add_sub_label(layer)
if hasattr(layer, 'outside_len'):
label += f"\nsize : {layer.outside_len}"
return label
@staticmethod
def get_shape(origin):
try:
return VizUtils.SHAPE_BY_CLASS_NAME[origin.__class__.__name__]
except:
return VizUtils.SHAPE_BY_CLASS_NAME['default']
@staticmethod
def get_color(origin):
if hasattr(origin, 'highlight'):
if origin.highlight == 2:
return 'red'
elif origin.highlight == 1:
return 'green'
if origin.get_branch():
return 'blue'
return 'grey'
@staticmethod
def correct_weird_pydot_bug(filename):
with open(filename, 'r') as f:
content = f.read()
with open(filename, 'w') as f:
# The \\1 means the same number of 3 as in the first group
f.write(re.sub("scale\(1\.(3+) 1\.\\1\) ", '', content))
def __init__(self, ts = None, expand_nested = True):
self.ts = ts if ts else dt.datetime.now().strftime('%Y%m%d_%H%M%S')
self.expand_nested = expand_nested
def model_to_dot(self, model):
self.raise_error_if_no_pydot()
dot = self.get_dot_graph(model)
main_layer = viz_utils_layer.convert_node_to_layer(model)
layers = main_layer.get_all_root_layers()
nodes = model.get_all_nodes()
self.check_coherence(model, main_layer)
self.create_nodes(layers, dot)
self.add_edges_in_dot(nodes, dot, main_layer.get_duplicated_id())
return dot
def check_coherence(_, model, main_layer):
assert((len(main_layer.get_all_nodes()) + len(main_layer.get_duplicated_id()))
== len(model.get_all_nodes()))
assert(set(main_layer.get_all_included_node_id())
== set(node.id for node in model.get_all_nodes()))
def add_edges_in_dot(self, nodes, dot, duplicated_id):
def get_str_id(node):
if node.id in duplicated_id:
return duplicated_id[node.id].get_str_id()
else:
return node.get_str_id()
for node in nodes:
node_id = get_str_id(node)
for inbound_node in node.previous:
inbound_node_id = get_str_id(inbound_node)
# Inspired source code (tensorflow) check that we have the incoming
# node id, not doing it here, because node can be in subgraph
self.add_edge(dot, inbound_node_id, node_id)
def get_dot_graph(self, model):
dot = pydot.Dot()
dot.set('rankdir', self.rankdir)
dot.set('concentrate', True)
dot.set('dpi', self.dpi)
dot.set_node_defaults(shape='record')
return dot
def create_nodes(self, layers, dot):
for layer in layers:
if len(layer.sub_layers):
origin = layer.layer_origin
cluster = pydot.Cluster(
style='dashed',
color=VizUtils.get_color(origin),
graph_name=str(origin),
label=VizUtils.get_label(origin)
)
dot.add_subgraph(cluster)
if layer.node is not None:
self.create_dot_node(cluster, layer)
self.create_nodes(layer.sub_layers, cluster)
else:
assert(layer.node is not None)
self.create_dot_node(dot, layer)
def create_dot_node(self, dot, layer):
origin = layer.layer_origin
node = pydot.Node(
layer.node.get_str_id(),
label=self.get_label(layer.node),
# Temporary for demo
URL=f"http://www.google.fr/?q={layer}",
shape=self.get_shape(origin),
color=self.get_color(origin)
)
dot.add_node(node)
def plot_model(self,
model,
to_file):
dot = self.model_to_dot(model)
if dot is None:
return
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
if extension == 'svg':
self.correct_weird_pydot_bug(to_file)
return SVG(to_file)
# svg is useless here, but kept for clarity
elif extension not in ('pdf', 'svg'):
try:
return IPython.display.Image(filename=to_file)
except ImportError:
pass
DEFAULT_DIR_NAME_PREFIX = 'ml_workflow_graph_detail'
def get_default_dirname(ts):
return f"{DEFAULT_DIR_NAME_PREFIX}_{ts.strftime('%Y%m%d_%H%M%S')}"
def plot_model_full_detail(model, directory = None, expand_nested=True, ts = None):
if ts is None:
ts = dt.datetime.now()
if directory is None:
directory = os.path.join(os.getcwd(), get_default_dirname(ts))
if not os.path.isdir(directory):
os.mkdir(directory)
to_file = os.path.join(directory, 'main_graph.svg')
viz_utils_node_detail_writer.write_detail_for_model(model, directory = directory)
return VizUtils(expand_nested=expand_nested, ts=ts).plot_model(model, to_file)
def plot_model(model, to_file='model.svg', expand_nested=True, ts=None):
"""Plot_model is creating an image of the model, representing the differents
steps, datasource, and so on"""
return VizUtils(expand_nested=expand_nested, ts=ts).plot_model(model, to_file)
| 31.207843
| 87
| 0.584569
|
90bff9fab40d4e4eca5179f6e58107702d052112
| 11,177
|
py
|
Python
|
qnapstats/qnap_stats.py
|
M4v3r1cK87/python-qnapstats
|
9ff63a8353fa882a102d84efac1f9955de3391ed
|
[
"MIT"
] | null | null | null |
qnapstats/qnap_stats.py
|
M4v3r1cK87/python-qnapstats
|
9ff63a8353fa882a102d84efac1f9955de3391ed
|
[
"MIT"
] | null | null | null |
qnapstats/qnap_stats.py
|
M4v3r1cK87/python-qnapstats
|
9ff63a8353fa882a102d84efac1f9955de3391ed
|
[
"MIT"
] | null | null | null |
"""Module containing multiple classes to obtain QNAP system stats via cgi calls."""
# -*- coding:utf-8 -*-
import base64
import json
import xmltodict
import requests
# pylint: disable=too-many-instance-attributes
class QNAPStats:
"""Class containing the main functions."""
# pylint: disable=too-many-arguments
def __init__(self, host, port, username, password, debugmode=False, verify_ssl=True, timeout=5):
"""Instantiate a new qnap_stats object."""
self._username = username
self._password = base64.b64encode(password.encode('utf-8')).decode('ascii')
self._sid = None
self._debugmode = debugmode
self._session_error = False
self._session = None # type: requests.Session
if not (host.startswith("http://") or host.startswith("https://")):
host = "http://" + host
self._verify_ssl = verify_ssl
self._timeout = timeout
self._base_url = '%s:%s/cgi-bin/' % (host, port)
def _debuglog(self, message):
"""Output message if debug mode is enabled."""
if self._debugmode:
print("DEBUG: " + message)
def _init_session(self):
if self._sid is None or self._session is None or self._session_error:
# Clear sid and reset error
self._sid = None
self._session_error = False
if self._session is not None:
self._session = None
self._debuglog("Creating new session")
self._session = requests.Session()
# We created a new session so login
if self._login() is False:
self._session_error = True
self._debuglog("Login failed, unable to process request")
return
def _login(self):
"""Log into QNAP and obtain a session id."""
data = {"user": self._username, "pwd": self._password}
result = self._execute_post_url("authLogin.cgi", data, False)
if result is None:
return False
self._sid = result["authSid"]
return True
def _get_url(self, url, retry_on_error=True, **kwargs):
"""High-level function for making GET requests."""
self._init_session()
result = self._execute_get_url(url, **kwargs)
if (self._session_error or result is None) and retry_on_error:
self._debuglog("Error occured, retrying...")
self._get_url(url, False, **kwargs)
return result
def _execute_get_url(self, url, append_sid=True, **kwargs):
"""Low-level function to execute a GET request."""
url = self._base_url + url
self._debuglog("GET from URL: " + url)
if append_sid:
self._debuglog("Appending access_token (SID: " + self._sid + ") to url")
url = "%s&sid=%s" % (url, self._sid)
resp = self._session.get(url, timeout=self._timeout, verify=self._verify_ssl)
return self._handle_response(resp, **kwargs)
def _execute_post_url(self, url, data, append_sid=True, **kwargs):
"""Low-level function to execute a POST request."""
url = self._base_url + url
self._debuglog("POST to URL: " + url)
if append_sid:
self._debuglog("Appending access_token (SID: " + self._sid + ") to url")
data["sid"] = self._sid
resp = self._session.post(url, data, timeout=self._timeout, verify=self._verify_ssl)
return self._handle_response(resp, **kwargs)
def _handle_response(self, resp, force_list=None):
"""Ensure response is successful and return body as XML."""
self._debuglog("Request executed: " + str(resp.status_code))
if resp.status_code != 200:
return None
if resp.headers["Content-Type"] != "text/xml":
# JSON requests not currently supported
return None
self._debuglog("Headers: " + json.dumps(dict(resp.headers)))
self._debuglog("Cookies: " + json.dumps(dict(resp.cookies)))
self._debuglog("Response Text: " + resp.text)
data = xmltodict.parse(resp.content, force_list=force_list)['QDocRoot']
auth_passed = data['authPassed']
if auth_passed is not None and len(auth_passed) == 1 and auth_passed == "0":
self._session_error = True
return None
return data
def get_system_health(self):
"""Obtain the system's overall health."""
resp = self._get_url("management/manaRequest.cgi?subfunc=sysinfo&sysHealth=1")
if resp is None:
return None
status = resp["func"]["ownContent"]["sysHealth"]["status"]
if status is None or len(status) == 0:
return None
return status
def get_volumes(self):
"""Obtain information about volumes and shared directories."""
resp = self._get_url(
"management/chartReq.cgi?chart_func=disk_usage&disk_select=all&include=all",
force_list=("volume", "volumeUse", "folder_element")
)
if resp is None:
return None
if resp["volumeList"] is None or resp["volumeUseList"] is None:
return {}
volumes = {}
id_map = {}
for vol in resp["volumeList"]["volume"]:
key = vol["volumeValue"]
label = vol["volumeLabel"] if "volumeLabel" in vol else "Volume " + vol["volumeValue"]
volumes[label] = {
"id": key,
"label": label
}
id_map[key] = label
for vol in resp["volumeUseList"]["volumeUse"]:
id_number = vol["volumeValue"]
# Skip any system reserved volumes
if id_number not in id_map.keys():
continue
key = id_map[id_number]
volumes[key]["free_size"] = int(vol["free_size"])
volumes[key]["total_size"] = int(vol["total_size"])
folder_elements = vol["folder_element"]
if len(folder_elements) > 0:
volumes[key]["folders"] = []
for folder in folder_elements:
try:
sharename = folder["sharename"]
used_size = int(folder["used_size"])
volumes[key]["folders"].append({"sharename": sharename, "used_size": used_size})
except Exception as e:
print(e.args)
return volumes
def get_smart_disk_health(self):
"""Obtain SMART information about each disk."""
resp = self._get_url("disk/qsmart.cgi?func=all_hd_data", force_list=("entry"))
if resp is None:
return None
disks = {}
for disk in resp["Disk_Info"]["entry"]:
if disk["Model"]:
disks[disk["HDNo"]] = {
"drive_number": disk["HDNo"],
"health": disk["Health"],
"temp_c": int(disk["Temperature"]["oC"]) if disk["Temperature"]["oC"] is not None else None,
"temp_f": int(disk["Temperature"]["oF"]) if disk["Temperature"]["oF"] is not None else None,
"capacity": disk["Capacity"],
"model": disk["Model"],
"serial": disk["Serial"],
"type": "ssd" if ("hd_is_ssd" in disk and int(disk["hd_is_ssd"])) else "hdd",
}
return disks
def get_system_stats(self):
"""Obtain core system information and resource utilization."""
resp = self._get_url(
"management/manaRequest.cgi?subfunc=sysinfo&hd=no&multicpu=1",
force_list=("DNS_LIST")
)
if resp is None:
return None
root = resp["func"]["ownContent"]["root"]
details = {
"system": {
"name": root["server_name"],
"model": resp["model"]["displayModelName"],
"serial_number": root["serial_number"],
"temp_c": int(root["sys_tempc"]),
"temp_f": int(root["sys_tempf"]),
"timezone": root["timezone"],
},
"firmware": {
"version": resp["firmware"]["version"],
"build": resp["firmware"]["build"],
"patch": resp["firmware"]["patch"],
"build_time": resp["firmware"]["buildTime"],
},
"uptime": {
"days": int(root["uptime_day"]),
"hours": int(root["uptime_hour"]),
"minutes": int(root["uptime_min"]),
"seconds": int(root["uptime_sec"]),
},
"cpu": {
"model": root["cpu_model"] if "cpu_model" in root else None,
"usage_percent": float(root["cpu_usage"].replace("%", "")),
"temp_c": int(root["cpu_tempc"]) if "cpu_tempc" in root else None,
"temp_f": int(root["cpu_tempf"]) if "cpu_tempf" in root else None,
},
"memory": {
"total": float(root["total_memory"]),
"free": float(root["free_memory"]),
},
"nics": {},
"dns": [],
}
nic_count = int(root["nic_cnt"])
for nic_index in range(nic_count):
i = str(nic_index + 1)
interface = "eth" + str(nic_index)
status = root["eth_status" + i]
details["nics"][interface] = {
"link_status": "Up" if status == "1" else "Down",
"max_speed": int(root["eth_max_speed" + i]),
"ip": root["eth_ip" + i],
"mask": root["eth_mask" + i],
"mac": root["eth_mac" + i],
"usage": root["eth_usage" + i],
"rx_packets": int(root["rx_packet" + i]),
"tx_packets": int(root["tx_packet" + i]),
"err_packets": int(root["err_packet" + i])
}
for dns in root["dnsInfo"]["DNS_LIST"]:
details["dns"].append(dns)
return details
def get_bandwidth(self):
"""Obtain the current bandwidth usage speeds."""
resp = self._get_url(
"management/chartReq.cgi?chart_func=QSM40bandwidth",
force_list=("item")
)
if resp is None:
return None
details = {}
default = resp["bandwidth_info"]["df_gateway"]
for item in resp["bandwidth_info"]["item"]:
interface = item["id"]
details[interface] = {
"name": item["name"],
"rx": round(int(item["rx"]) / 5),
"tx": round(int(item["tx"]) / 5),
"is_default": interface == default
}
return details
def get_firmware_update(self):
"""Get firmware update version if available."""
resp = self._get_url("sys/sysRequest.cgi?subfunc=firm_update")
if resp is None:
return None
new_version = resp["func"]["ownContent"]["newVersion"]
if new_version is None or len(new_version) == 0:
return None
return new_version
| 35.595541
| 112
| 0.539501
|
943f320206f0b42d413805507cd6d51639f88941
| 2,145
|
py
|
Python
|
src/Legacy/fp_train.py
|
snakepowerpoint/wifi_predictor
|
91b20e4ec53caae274134868763c74dcadef3dc4
|
[
"Apache-2.0"
] | 2
|
2017-10-02T07:21:40.000Z
|
2018-01-16T09:54:37.000Z
|
src/Legacy/fp_train.py
|
snakepowerpoint/wifi_predictor
|
91b20e4ec53caae274134868763c74dcadef3dc4
|
[
"Apache-2.0"
] | null | null | null |
src/Legacy/fp_train.py
|
snakepowerpoint/wifi_predictor
|
91b20e4ec53caae274134868763c74dcadef3dc4
|
[
"Apache-2.0"
] | 1
|
2018-07-12T08:04:17.000Z
|
2018-07-12T08:04:17.000Z
|
import pandas as pd
import sklearn as skl
from sklearn import svm
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn import cluster
from sklearn.grid_search import GridSearchCV
from sklearn.manifold import Isomap
import matplotlib.pyplot as plt
h5data = pd.HDFStore('../data/raw_data.h5')
raw_data= h5data["raw_data"]
label = raw_data["label"]
data = raw_data.drop(["label"], axis=1)
np_data = data.as_matrix()
np_label = label.as_matrix()
x_train, x_test, y_train, y_test = train_test_split(np_data, np_label, test_size=0.2, random_state=42)
"""
#SVM model
svc_model = svm.SVC(gamma=0.001, C=100., kernel='linear')
svc_model.fit(x_train, y_train)
prediction = svc_model.predict(x_test)
"""
# Grid search for parameters
parameter_candidates = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
clf = GridSearchCV(estimator=svm.SVC(), param_grid=parameter_candidates, n_jobs=-1)
clf.fit(x_train, y_train)
prediction = clf.predict(x_test)
print('Best score for training data:', clf.best_score_)
print('Best `C`:',clf.best_estimator_.C)
print('Best kernel:',clf.best_estimator_.kernel)
print('Best `gamma`:',clf.best_estimator_.gamma)
#clf = cluster.KMeans(init='k-means++', n_clusters=5, random_state=42)
#clf.fit(x_train)
#prediction = clf.fit_predict(x_train)
fig, ax = plt.subplots(2, 2, figsize=(8, 4))
print("Start Down Scale")
from sklearn.decomposition import PCA
train_iso = PCA(n_components=2).fit_transform(x_train)
test_iso = PCA(n_components=2).fit_transform(x_test)
ax[0][0].scatter(train_iso[:, 0], train_iso[:, 1], c=y_train)
ax[0][0].set_title('Predicted Training Labels')
ax[1][0].scatter(test_iso[:, 0], test_iso[:, 1], c=y_test)
ax[1][0].set_title('Actual Test Labels')
ax[1][1].scatter(test_iso[:, 0], test_iso[:, 1], c=prediction)
ax[1][1].set_title('Prediction Test Labels')
plt.show()
# Evaluation
#accuracy = np.sum(np.equal(prediction, y_test))/len(prediction)
#print(confusion_matrix(y_test, prediction, labels=["0","1","2","3", "4"]))
| 26.8125
| 102
| 0.725408
|
f7abf9d31cd1242e90244641e8cdac3e6178df74
| 9,833
|
py
|
Python
|
examples/reuploading_classifier/qlassifier.py
|
mofeing/qibo
|
3eb675ba893bf35f103d41a8a64c86aae9cbf616
|
[
"Apache-2.0"
] | 81
|
2020-09-04T10:54:40.000Z
|
2021-05-17T13:20:38.000Z
|
examples/reuploading_classifier/qlassifier.py
|
mofeing/qibo
|
3eb675ba893bf35f103d41a8a64c86aae9cbf616
|
[
"Apache-2.0"
] | 201
|
2020-08-24T08:41:33.000Z
|
2021-05-18T12:23:19.000Z
|
examples/reuploading_classifier/qlassifier.py
|
mofeing/qibo
|
3eb675ba893bf35f103d41a8a64c86aae9cbf616
|
[
"Apache-2.0"
] | 20
|
2021-06-11T18:13:09.000Z
|
2022-03-28T07:32:09.000Z
|
import numpy as np
from qibo.models import Circuit
from qibo import gates, K
from datasets import create_dataset, create_target, fig_template, world_map_template
from matplotlib.cm import get_cmap
from matplotlib.colors import Normalize
import os
class single_qubit_classifier:
def __init__(self, name, layers, grid=11, test_samples=1000, seed=0):
"""Class with all computations needed for classification.
Args:
name (str): Name of the problem to create the dataset, to choose between
['circle', '3 circles', 'square', '4 squares', 'crown', 'tricrown', 'wavy lines'].
layers (int): Number of layers to use in the classifier.
grid (int): Number of points in one direction defining the grid of points.
If not specified, the dataset does not follow a regular grid.
samples (int): Number of points in the set, randomly located.
This argument is ignored if grid is specified.
seed (int): Random seed.
Returns:
Dataset for the given problem (x, y).
"""
np.random.seed(seed)
self.name = name
self.layers = layers
self.training_set = create_dataset(name, grid=grid)
self.test_set = create_dataset(name, samples=test_samples)
self.target = create_target(name)
self.params = np.random.randn(layers * 4)
self._circuit = self._initialize_circuit()
try:
os.makedirs('results/'+self.name+'/%s_layers' % self.layers)
except:
pass
def set_parameters(self, new_params):
"""Method for updating parameters of the class.
Args:
new_params (array): New parameters to update
"""
self.params = new_params
def _initialize_circuit(self):
"""Creates variational circuit."""
C = Circuit(1)
for l in range(self.layers):
C.add(gates.RY(0, theta=0))
C.add(gates.RZ(0, theta=0))
return C
def circuit(self, x):
"""Method creating the circuit for a point (in the datasets).
Args:
x (array): Point to create the circuit.
Returns:
Qibo circuit.
"""
params = []
for i in range(0, 4 * self.layers, 4):
params.append(self.params[i] * x[0] + self.params[i + 1])
params.append(self.params[i + 2] * x[1] + self.params[i + 3])
self._circuit.set_parameters(params)
return self._circuit
def cost_function_one_point_fidelity(self, x, y):
"""Method for computing the cost function for
a given sample (in the datasets), using fidelity.
Args:
x (array): Point to create the circuit.
y (int): label of x.
Returns:
float with the cost function.
"""
C = self.circuit(x)
state = C.execute()
cf = .5 * (1 - fidelity(state, self.target[y])) ** 2
return cf
def cost_function_fidelity(self, params=None):
"""Method for computing the cost function for the training set, using fidelity.
Args:
params(array): new parameters to update before computing
Returns:
float with the cost function.
"""
if params is None:
params = self.params
self.set_parameters(params)
cf = 0
for x, y in zip(self.training_set[0], self.training_set[1]):
cf += self.cost_function_one_point_fidelity(x, y)
cf /= len(self.training_set[0])
return cf
def minimize(self, method='BFGS', options=None, compile=True):
loss = self.cost_function_fidelity
if method == 'cma':
# Genetic optimizer
import cma
r = cma.fmin2(lambda p: K.to_numpy(loss(p)), self.params, 2)
result = r[1].result.fbest
parameters = r[1].result.xbest
elif method == 'sgd':
circuit = self.circuit(self.training_set[0])
for gate in circuit.queue:
if not K.supports_gradients:
from qibo.config import raise_error
raise_error(RuntimeError,
'Use tensorflow backend in order to compute gradients.')
sgd_options = {"nepochs": 5001,
"nmessage": 1000,
"optimizer": "Adamax",
"learning_rate": 0.5}
if options is not None:
sgd_options.update(options)
# proceed with the training
vparams = K.Variable(self.params)
optimizer = getattr(K.optimizers, sgd_options["optimizer"])(
learning_rate=sgd_options["learning_rate"])
def opt_step():
with K.GradientTape() as tape:
l = loss(vparams)
grads = tape.gradient(l, [vparams])
optimizer.apply_gradients(zip(grads, [vparams]))
return l, vparams
if compile:
opt_step = K.function(opt_step)
l_optimal, params_optimal = 10, self.params
for e in range(sgd_options["nepochs"]):
l, vparams = opt_step()
if l < l_optimal:
l_optimal, params_optimal = l, vparams
if e % sgd_options["nmessage"] == 0:
print('ite %d : loss %f' % (e, K.to_numpy(l)))
result = K.to_numpy(self.cost_function(params_optimal))
parameters = K.to_numpy(params_optimal)
else:
import numpy as np
from scipy.optimize import minimize
m = minimize(lambda p: K.to_numpy(loss(p)), self.params,
method=method, options=options)
result = m.fun
parameters = m.x
return result, parameters
def eval_test_set_fidelity(self):
"""Method for evaluating points in the training set, using fidelity.
Returns:
list of guesses.
"""
labels = [[0]] * len(self.test_set[0])
for j, x in enumerate(self.test_set[0]):
C = self.circuit(x)
state = C.execute()
fids = np.empty(len(self.target))
for i, t in enumerate(self.target):
fids[i] = fidelity(state, t)
labels[j] = np.argmax(fids)
return labels
def paint_results(self):
"""Method for plotting the guessed labels and the right guesses.
Returns:
plot with results.
"""
fig, axs = fig_template(self.name)
guess_labels = self.eval_test_set_fidelity()
colors_classes = get_cmap('tab10')
norm_class = Normalize(vmin=0, vmax=10)
x = self.test_set[0]
x_0, x_1 = x[:, 0], x[:, 1]
axs[0].scatter(x_0, x_1, c=guess_labels, s=2,
cmap=colors_classes, norm=norm_class)
colors_rightwrong = get_cmap('RdYlGn')
norm_rightwrong = Normalize(vmin=-.1, vmax=1.1)
checks = [int(g == l) for g, l in zip(guess_labels, self.test_set[1])]
axs[1].scatter(x_0, x_1, c=checks, s=2,
cmap=colors_rightwrong, norm=norm_rightwrong)
print('The accuracy for this classification is %.2f' %
(100 * np.sum(checks) / len(checks)), '%')
fig.savefig('results/'+self.name +
'/%s_layers/test_set.pdf' % self.layers)
def paint_world_map(self):
"""Method for plotting the proper labels on the Bloch sphere.
Returns:
plot with 2D representation of Bloch sphere.
"""
angles = np.zeros((len(self.test_set[0]), 2))
from datasets import laea_x, laea_y
fig, ax = world_map_template()
colors_classes = get_cmap('tab10')
norm_class = Normalize(vmin=0, vmax=10)
for i, x in enumerate(self.test_set[0]):
C = self.circuit(x)
state = C.execute()
angles[i, 0] = np.pi / 2 - \
np.arccos(np.abs(state[0]) ** 2 - np.abs(state[1]) ** 2)
angles[i, 1] = np.angle(state[1] / state[0])
ax.scatter(laea_x(angles[:, 1], angles[:, 0]), laea_y(angles[:, 1], angles[:, 0]), c=self.test_set[1],
cmap=colors_classes, s=15, norm=norm_class)
if len(self.target) == 2:
angles_0 = np.zeros(len(self.target))
angles_1 = np.zeros(len(self.target))
angles_0[0] = np.pi / 2
angles_0[1] = -np.pi / 2
col = list(range(2))
elif len(self.target) == 3:
angles_0 = np.zeros(len(self.target) + 1)
angles_1 = np.zeros(len(self.target) + 1)
angles_0[0] = np.pi / 2
angles_0[1] = -np.pi / 6
angles_0[2] = -np.pi / 6
angles_0[3] = -np.pi / 6
angles_1[2] = np.pi
angles_1[3] = -np.pi
col = list(range(3)) + [2]
else:
angles_0 = np.zeros(len(self.target))
angles_1 = np.zeros(len(self.target))
for i, state in enumerate(self.target):
angles_0[i] = np.pi / 2 - \
np.arccos(np.abs(state[0]) ** 2 - np.abs(state[1]) ** 2)
angles_1[i] = np.angle(state[1] / state[0])
col = list(range(len(self.target)))
ax.scatter(laea_x(angles_1, angles_0), laea_y(angles_1, angles_0), c=col,
cmap=colors_classes, s=500, norm=norm_class, marker='P', zorder=11)
ax.axis('off')
fig.savefig('results/'+self.name +
'/%s_layers/world_map.pdf' % self.layers)
def fidelity(state1, state2):
return K.abs(K.sum(K.qnp.conj(state2) * state1)) ** 2
| 36.418519
| 110
| 0.549171
|
9d9feab8cfe65db70c30024eace23bfdc6778936
| 159
|
py
|
Python
|
awsbeamline/__meta__.py
|
maheshda-aws/aws-beamline
|
e8505924ae698435c0e0a6ebbd8ed2f2bbd5b674
|
[
"MIT"
] | 1
|
2020-10-21T19:14:59.000Z
|
2020-10-21T19:14:59.000Z
|
awsbeamline/__meta__.py
|
maheshda-aws/aws-beamline
|
e8505924ae698435c0e0a6ebbd8ed2f2bbd5b674
|
[
"MIT"
] | null | null | null |
awsbeamline/__meta__.py
|
maheshda-aws/aws-beamline
|
e8505924ae698435c0e0a6ebbd8ed2f2bbd5b674
|
[
"MIT"
] | null | null | null |
__packageName__ = "awsbeamline"
__packageVersion__ = "0.0.1"
__license__ = "Apache License 2.0"
__description__ = "Utility to orchestrate ETL using AWS Batch."
| 39.75
| 63
| 0.779874
|
b7bb6cf212589be5f14c942f951322653058198d
| 1,833
|
py
|
Python
|
seamless/core/protocol/validate_subcelltype.py
|
sjdv1982/seamless
|
1b814341e74a56333c163f10e6f6ceab508b7df9
|
[
"MIT"
] | 15
|
2017-06-07T12:49:12.000Z
|
2020-07-25T18:06:04.000Z
|
seamless/core/protocol/validate_subcelltype.py
|
sjdv1982/seamless
|
1b814341e74a56333c163f10e6f6ceab508b7df9
|
[
"MIT"
] | 110
|
2016-06-21T23:20:44.000Z
|
2022-02-24T16:15:22.000Z
|
seamless/core/protocol/validate_subcelltype.py
|
sjdv1982/seamless
|
1b814341e74a56333c163f10e6f6ceab508b7df9
|
[
"MIT"
] | 6
|
2016-06-21T11:19:22.000Z
|
2019-01-21T13:45:39.000Z
|
"""
- sets of (checksum, celltype, subcelltype)
Means that the value (deserialized from the buffer with the checksum using
celltype) validates against subcelltype.
Meaningful values of (celltype, subcelltype):
("python", "transformer"/"reactor"/"macro").
"""
import ast
import json
validation_cache = set()
async def validate_subcelltype(checksum, celltype, subcelltype, codename):
if celltype != "python":
if celltype == "plain" and subcelltype == "module":
pass
else:
return
if codename is None:
codename = "<Unknown>"
key = (checksum, celltype, subcelltype)
if key in validation_cache:
return
try:
buffer = get_buffer(checksum)
except CacheMissError:
return # TODO: for now, tolerate cache misses. In the future, try to get validation cache remotely
value = buffer.decode()
if celltype == "plain" and subcelltype == "module":
v = json.loads(value)
"""
if not v.get("dependencies"):
build_module(v, module_error_name=None, ...)
""" # pointless; why validate some modules but not all,
# and anyway, the result may depend on compilers/languages
else:
tree = ast.parse(value, filename=codename)
# cached_compile(value, codename) # pointless; syntax error is not caught
if subcelltype in ("reactor", "macro"):
mode, _ = analyze_code(value, codename)
if mode in ("expression", "lambda"):
err = "subcelltype '%s' does not support code mode '%s'" % (subcelltype, mode)
raise SyntaxError((codename, err))
validation_cache.add(key)
from .get_buffer import get_buffer, CacheMissError
from ..cached_compile import analyze_code, cached_compile
from ..build_module import build_module
| 35.25
| 106
| 0.649755
|
b99761d0cb84faecc9b3c9a950bfbd025716e7de
| 11,391
|
py
|
Python
|
tests/seleniumwire/proxy/test_storage.py
|
nck/selenium-wire
|
be2cca0dc556ebf84daac84a3a0315378f871f48
|
[
"MIT"
] | 5
|
2020-03-11T06:08:08.000Z
|
2021-09-22T02:46:51.000Z
|
tests/seleniumwire/proxy/test_storage.py
|
nck/selenium-wire
|
be2cca0dc556ebf84daac84a3a0315378f871f48
|
[
"MIT"
] | 3
|
2020-08-18T09:35:55.000Z
|
2021-11-15T17:49:55.000Z
|
tests/seleniumwire/proxy/test_storage.py
|
nck/selenium-wire
|
be2cca0dc556ebf84daac84a3a0315378f871f48
|
[
"MIT"
] | 1
|
2021-03-04T08:39:21.000Z
|
2021-03-04T08:39:21.000Z
|
from datetime import datetime, timedelta
from fnmatch import fnmatch
import glob
import gzip
from http.client import HTTPMessage
from io import BytesIO
import os
import pickle
import shutil
from unittest import TestCase
from unittest.mock import Mock
from seleniumwire.proxy.storage import RequestStorage
class RequestStorageTest(TestCase):
def test_initialise(self):
RequestStorage(base_dir=self.base_dir)
storage_dir = glob.glob(os.path.join(self.base_dir, '.seleniumwire', 'storage-*'))
self.assertEqual(len(storage_dir), 1)
def test_cleanup_removes_storage(self):
storage = RequestStorage(base_dir=self.base_dir)
storage.cleanup()
# The 'seleniumwire' parent folder should have been cleaned up
# when there is nothing left inside of it.
self.assertFalse(os.listdir(self.base_dir))
def test_cleanup_does_not_remove_parent_folder(self):
# There is an existing storage folder
os.makedirs(os.path.join(self.base_dir, '.seleniumwire', 'teststorage'))
storage = RequestStorage(base_dir=self.base_dir)
storage.cleanup()
# The existing storage folder is not cleaned up
self.assertEqual(len(os.listdir(self.base_dir)), 1)
self.assertTrue(os.path.exists(os.path.join(self.base_dir, '.seleniumwire', 'teststorage')))
def test_initialise_clears_old_folders(self):
old_dir = os.path.join(self.base_dir, '.seleniumwire', 'storage-test1')
new_dir = os.path.join(self.base_dir, '.seleniumwire', 'storage-test2')
os.makedirs(old_dir)
os.makedirs(new_dir)
two_days_ago = (datetime.now() - timedelta(days=2)).timestamp()
os.utime(old_dir, times=(two_days_ago, two_days_ago))
RequestStorage(base_dir=self.base_dir)
self.assertFalse(os.path.exists(old_dir))
self.assertTrue(os.path.exists(new_dir))
def test_save_request(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
request_file_path = self._get_stored_path(request_id, 'request')
with open(request_file_path[0], 'rb') as loaded:
loaded_request = pickle.load(loaded)
self.assertEqual(loaded_request['id'], request_id)
self.assertEqual(loaded_request['path'], 'http://www.example.com/test/path/')
self.assertEqual(loaded_request['method'], 'GET')
self.assertEqual(loaded_request['headers'], {
'Host': 'www.example.com',
'Accept': '*/*'
})
self.assertIsNone(loaded_request['response'])
def test_save_request_with_body(self):
mock_request = self._create_mock_request()
request_body = b'test request body'
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request, request_body=request_body)
request_body_path = self._get_stored_path(request_id, 'requestbody')
with open(request_body_path[0], 'rb') as loaded:
loaded_body = pickle.load(loaded)
self.assertEqual(loaded_body, b'test request body')
def test_save_response(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
mock_response = self._create_mock_resonse()
storage.save_response(request_id, mock_response)
response_file_path = self._get_stored_path(request_id, 'response')
with open(response_file_path[0], 'rb') as loaded:
loaded_response = pickle.load(loaded)
self.assertEqual(loaded_response['status_code'], 200)
self.assertEqual(loaded_response['reason'], 'OK')
self.assertEqual(loaded_response['headers'], {
'Content-Type': 'application/json',
'Content-Length': '500'
})
def test_save_response_with_body(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
mock_response = self._create_mock_resonse()
response_body = b'some response body'
storage.save_response(request_id, mock_response, response_body=response_body)
response_body_path = self._get_stored_path(request_id, 'responsebody')
with open(response_body_path[0], 'rb') as loaded:
loaded_body = pickle.load(loaded)
self.assertEqual(loaded_body, b'some response body')
def test_save_response_no_request(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
mock_response = self._create_mock_resonse()
storage.clear_requests()
storage.save_response(request_id, mock_response)
response_file_path = self._get_stored_path(request_id, 'response')
self.assertFalse(response_file_path)
def test_load_requests(self):
mock_request_1 = self._create_mock_request()
mock_request_2 = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id1 = storage.save_request(mock_request_1)
request_id2 = storage.save_request(mock_request_2)
requests = storage.load_requests()
self.assertEqual(len(requests), 2)
self.assertEqual(requests[0]['id'], request_id1)
self.assertEqual(requests[1]['id'], request_id2)
self.assertIsNone(requests[0]['response'])
self.assertIsNone(requests[1]['response'])
def test_load_response(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
mock_response = self._create_mock_resonse()
storage.save_response(request_id, mock_response)
requests = storage.load_requests()
self.assertIsNotNone(requests[0]['response'])
def test_load_request_body(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request, request_body=b'test request body')
request_body = storage.load_request_body(request_id)
self.assertEqual(request_body, b'test request body')
def test_load_response_body(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request, request_body=b'test request body')
mock_response = self._create_mock_resonse()
storage.save_response(request_id, mock_response, response_body=b'test response body')
response_body = storage.load_response_body(request_id)
self.assertEqual(response_body, b'test response body')
def test_load_response_body_encoded(self):
io = BytesIO()
with gzip.GzipFile(fileobj=io, mode='wb') as f:
f.write(b'test response body')
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request, request_body=b'test request body')
mock_response = self._create_mock_resonse()
mock_response.headers['Content-Encoding'] = 'gzip'
storage.save_response(request_id, mock_response, response_body=io.getvalue())
response_body = storage.load_response_body(request_id)
self.assertEqual(response_body, b'test response body')
def test_load_last_request(self):
mock_request_1 = self._create_mock_request()
mock_request_2 = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
storage.save_request(mock_request_1)
request_id2 = storage.save_request(mock_request_2)
last_request = storage.load_last_request()
self.assertEqual(last_request['id'], request_id2)
def test_load_last_request_none(self):
storage = RequestStorage(base_dir=self.base_dir)
last_request = storage.load_last_request()
self.assertIsNone(last_request)
def test_clear_requests(self):
mock_request_1 = self._create_mock_request()
mock_request_2 = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
storage.save_request(mock_request_1)
storage.save_request(mock_request_2)
storage.clear_requests()
requests = storage.load_requests()
self.assertFalse(requests)
self.assertFalse(glob.glob(os.path.join(self.base_dir, '.seleniumwire', 'storage-*', '*')))
def test_get_cert_dir(self):
storage = RequestStorage(base_dir=self.base_dir)
self.assertTrue(fnmatch(storage.get_cert_dir(),
os.path.join(self.base_dir, '.seleniumwire', 'storage-*', 'certs')))
def test_find(self):
mock_request_1 = self._create_mock_request('http://www.example.com/test/path/?foo=bar')
mock_request_2 = self._create_mock_request('http://www.stackoverflow.com/other/path/?x=y')
mock_response = self._create_mock_resonse()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request_1)
storage.save_response(request_id, mock_response)
storage.save_request(mock_request_2)
self.assertEqual(storage.find('/test/path/')['id'], request_id)
self.assertEqual(storage.find('/test/path/?foo=bar')['id'], request_id)
self.assertEqual(storage.find('http://www.example.com/test/path/?foo=bar')['id'], request_id)
self.assertEqual(storage.find('http://www.example.com/test/path/')['id'], request_id)
self.assertIsNone(storage.find('/different/path'))
self.assertIsNone(storage.find('/test/path/?x=y'))
self.assertIsNone(storage.find('http://www.example.com/different/path/?foo=bar'))
self.assertIsNone(storage.find('http://www.different.com/test/path/?foo=bar'))
self.assertIsNone(storage.find('http://www.example.com/test/path/?x=y'))
def _get_stored_path(self, request_id, filename):
return glob.glob(os.path.join(self.base_dir, '.seleniumwire', 'storage-*',
'request-{}'.format(request_id), filename))
def _create_mock_request(self, path='http://www.example.com/test/path/'):
mock_request = Mock()
mock_request.path = path
mock_request.command = 'GET'
headers = HTTPMessage()
headers.add_header('Host', 'www.example.com')
headers.add_header('Accept', '*/*')
mock_request.headers = headers
return mock_request
def _create_mock_resonse(self):
mock_response = Mock()
mock_response.status = 200
mock_response.reason = 'OK'
headers = HTTPMessage()
headers.add_header('Content-Type', 'application/json')
headers.add_header('Content-Length', '500')
mock_response.headers = headers
return mock_response
def setUp(self):
self.base_dir = os.path.join(os.path.dirname(__file__), 'data')
def tearDown(self):
shutil.rmtree(os.path.join(self.base_dir), ignore_errors=True)
| 39.968421
| 101
| 0.689141
|
98d2144a32d3d785b9afef57484fedb1bf3c1a79
| 775
|
py
|
Python
|
webstore/carts/serializers.py
|
dmusial98/WebStorePython
|
ed98764a40dd82db2b57e030ff9bf0bc777075a7
|
[
"Unlicense"
] | null | null | null |
webstore/carts/serializers.py
|
dmusial98/WebStorePython
|
ed98764a40dd82db2b57e030ff9bf0bc777075a7
|
[
"Unlicense"
] | null | null | null |
webstore/carts/serializers.py
|
dmusial98/WebStorePython
|
ed98764a40dd82db2b57e030ff9bf0bc777075a7
|
[
"Unlicense"
] | null | null | null |
from .models import Cart, CartProduct
from rest_framework import serializers
class CartProductSerializer(serializers.HyperlinkedModelSerializer):
cartId = serializers.PrimaryKeyRelatedField(queryset=Cart.objects.all(),source='cart.id')
class Meta:
model = CartProduct
fields = ('id','productId', 'count', 'cartId')
def create(self, validated_data):
subject = CartProduct.objects.create(cart=validated_data['cart']['id'], productId=validated_data['productId'], count=validated_data['count'])
return subject
class CartSerializer(serializers.HyperlinkedModelSerializer):
products = CartProductSerializer(many=True, read_only=True)
class Meta:
model = Cart
fields = ('userId','id','products')
| 29.807692
| 149
| 0.712258
|
91650ec09c31ee775bca50797ec710c767edfe48
| 1,047
|
py
|
Python
|
backend/breach/migrations/0008_auto_20160314_2049.py
|
Cancelll/rupture
|
cd87481717b39de2654659b7ff436500e28a0600
|
[
"MIT"
] | 184
|
2016-03-31T04:19:42.000Z
|
2021-11-26T21:37:12.000Z
|
backend/breach/migrations/0008_auto_20160314_2049.py
|
Cancelll/rupture
|
cd87481717b39de2654659b7ff436500e28a0600
|
[
"MIT"
] | 212
|
2016-03-31T04:32:06.000Z
|
2017-02-26T09:34:47.000Z
|
backend/breach/migrations/0008_auto_20160314_2049.py
|
Cancelll/rupture
|
cd87481717b39de2654659b7ff436500e28a0600
|
[
"MIT"
] | 38
|
2016-03-31T09:09:44.000Z
|
2021-11-26T21:37:13.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-14 20:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0007_auto_20160309_1802'),
]
operations = [
migrations.AlterField(
model_name='round',
name='amount',
field=models.IntegerField(default=1, help_text='Number of samples contained in each sampleset of this round.'),
),
migrations.AlterField(
model_name='sampleset',
name='completed',
field=models.DateTimeField(blank=True, default=None, help_text='When we stopped collecting samples for this sampleset, successfully or not', null=True),
),
migrations.AlterField(
model_name='sampleset',
name='started',
field=models.DateTimeField(blank=True, default=None, help_text='Date and time at which sample set collection was started', null=True),
),
]
| 33.774194
| 164
| 0.637058
|
6172143f9fa3694b628ac83bdb7d2e47dca9e200
| 4,794
|
py
|
Python
|
spotify/playback_history.py
|
wijtserekker/EFMSX
|
f994ef674ab0273c9f7826002c2c7a405d3e3205
|
[
"MIT"
] | null | null | null |
spotify/playback_history.py
|
wijtserekker/EFMSX
|
f994ef674ab0273c9f7826002c2c7a405d3e3205
|
[
"MIT"
] | null | null | null |
spotify/playback_history.py
|
wijtserekker/EFMSX
|
f994ef674ab0273c9f7826002c2c7a405d3e3205
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import json
import datetime
import numpy as np
import seaborn as sns
FILENAME = 'json/fixed_history.json'
TIMESTAMP = 'ts'
ARTIST_NAME = 'master_metadata_album_artist_name'
ARTIST_THRESHOLD = 200
DAY_RANGE = (1, 366) # Normally (1, 366)
MON_RANGE = (1, 13) # Normally (1, 13)
YEAR = 2018
TS_FORMAT = '%Y-%m-%d %H:%M:%S'
TIMESCALE = 1
with open(FILENAME) as file:
c_data = json.load(file)
data = c_data['data']
x_days = np.array([x for x in range(DAY_RANGE[0], DAY_RANGE[1])])
x_mons = np.array([x for x in range(MON_RANGE[0], MON_RANGE[1])])
artists_dict_days = {}
artists_dict_mons = {}
y_artists = []
y_labels = []
for field in data:
if ARTIST_NAME in field.keys():
artist_name = field[ARTIST_NAME]
timestamp = field[TIMESTAMP][:-4]
dt = datetime.datetime.strptime(timestamp, TS_FORMAT)
tt = dt.timetuple()
year = tt.tm_year
day = tt.tm_yday
mon = tt.tm_mon
if year == YEAR:
if artist_name not in artists_dict_days.keys():
if day in range(DAY_RANGE[0], DAY_RANGE[1]):
artists_dict_days[artist_name] = np.zeros((DAY_RANGE[1] - 1,), dtype=int)
if mon in range(MON_RANGE[0], MON_RANGE[1]):
artists_dict_mons[artist_name] = np.zeros((MON_RANGE[1] - 1,), dtype=int)
print(artist_name)
artists_dict_days[artist_name][day - 1] += 1
artists_dict_mons[artist_name][mon - 1] += 1
if TIMESCALE == 0:
for artist_name in artists_dict_days.keys():
if sum(artists_dict_days[artist_name]) > ARTIST_THRESHOLD:
y_artists.append(artists_dict_days[artist_name])
y_labels.append(artist_name)
if TIMESCALE == 1:
for artist_name in artists_dict_mons.keys():
if sum(artists_dict_days[artist_name]) > ARTIST_THRESHOLD:
y_artists.append(artists_dict_mons[artist_name])
y_labels.append(artist_name)
y_artists_np = np.vstack(y_artists)
# y_percent = y_artists_np
y_percent = y_artists_np / y_artists_np.sum(axis=0).astype(float) * 100
fig_size = plt.rcParams['figure.figsize']
fig_size[0] = 3
fig_size[1] = 2
plt.rcParams['figure.figsize'] = fig_size
plt.rcParams['figure.dpi'] = 300
# plt.figure(figsize=(16, 9), dpi=200)
sns.set_palette(sns.color_palette('hls', len(y_labels)))
fig, ax = plt.subplots()
if TIMESCALE == 0:
ax.stackplot(x_days, y_percent, labels=y_labels)
if TIMESCALE == 1:
ax.stackplot(x_mons, y_percent, labels=y_labels)
chart_box = ax.get_position()
# ax.set_position([chart_box.x0, chart_box.y0, chart_box.x1*0.1, chart_box.y1])
ax.legend(loc='upper center', bbox_to_anchor=(1.23, 1), prop={'size': 3})
plt.show()
# print(artists_dict)
# "ts":"2012-09-23 18:56:13 UTC", TIMESTAMP
# "username":"1147860656", USERNAME
# "platform":"iOS 6.0 (iPod4,1)", PLATFORM
# "ms_played":"4970", PLAY TIME ms
# "conn_country":"NL", COUNTRY
# "ip_addr_decrypted":"94.215.158.175", IP ADDRESS
# "user_agent_decrypted":"unknown", USER AGENT
# "master_metadata_track_name":"Breakeven - Live At The Aviva Stadium, Dublin", TRACK NAME
# "master_metadata_album_artist_name":"The Script", ARTIST NAME
# "master_metadata_album_album_name":"#3 Deluxe Version", ALBUM NAME
# "reason_start":"fwdbtn", START REASON
# "reason_end":"fwdbtn", END REASON
# "shuffle":false, SHUFFLE true/false
# "skipped":true, SKIPPED true/false
# "offline":false, OFFLINE true/false
# "offline_timestamp":"0", OFFLINE TS
# "incognito_mode":false, INCOGNITO MODE true/false
# "metro_code":"0", METRO CODE
# "longitude":0, LONGITUDE
# "latitude":0 LATITUDE
| 37.748031
| 113
| 0.516062
|
9656f5051d65b2f6b53c4a67f23360e46fddfa30
| 15,135
|
py
|
Python
|
tests/chainer_tests/testing_tests/test_function.py
|
Qwinpin/chainer
|
1dca01bc8a1aceec6ee53a66d24970b203a9fc51
|
[
"MIT"
] | 1
|
2019-02-12T23:10:16.000Z
|
2019-02-12T23:10:16.000Z
|
tests/chainer_tests/testing_tests/test_function.py
|
nolfwin/chainer
|
8d776fcc1e848cb9d3800a6aab356eb91ae9d088
|
[
"MIT"
] | 1
|
2018-06-26T08:16:09.000Z
|
2018-06-26T08:16:09.000Z
|
tests/chainer_tests/testing_tests/test_function.py
|
nolfwin/chainer
|
8d776fcc1e848cb9d3800a6aab356eb91ae9d088
|
[
"MIT"
] | 1
|
2018-05-28T22:43:34.000Z
|
2018-05-28T22:43:34.000Z
|
import unittest
import numpy
import pytest
import chainer
from chainer import testing
from chainer import utils
import chainerx
_inject_backend_tests = testing.inject_backend_tests(
None,
[
# CPU tests
{},
{'use_ideep': 'always'},
# GPU tests
{'use_cuda': True},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX tests
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
def _forward_correct(x1, x2):
dt = x1.dtype.type
y1 = (x1 + x2) ** dt(2)
y2 = (x1 ** dt(2)) * (x2 ** dt(2))
return utils.force_array(y1), utils.force_array(y2)
def _backward_correct(x1, x2, gy1, gy2):
dt = x1.dtype.type
ggx1 = (
+ gy1 * dt(2) * (x1 + x2)
+ gy2 * dt(2) * x1 * x2 ** dt(2))
ggx2 = (
+ gy1 * dt(2) * (x1 + x2)
+ gy2 * dt(2) * x1 ** dt(2) * x2)
return ggx1, ggx2
def _double_backward_correct(x1, x2, gy1, gy2, ggx1, ggx2):
dt = x1.dtype.type
ggy1 = (ggx1 + ggx2) * dt(2) * (x1 + x2)
ggy2 = (ggx1 * x2 + ggx2 * x1) * dt(2) * x1 * x2
gx1 = (
+ ggx1 * (dt(2) * gy1 + dt(2) * x2 ** dt(2) * gy2)
+ ggx2 * (dt(2) * gy1 + dt(4) * x1 * x2 * gy2))
gx2 = (
+ ggx1 * (dt(2) * gy1 + dt(4) * x1 * x2 * gy2)
+ ggx2 * (dt(2) * gy1 + dt(2) * x1 ** dt(2) * gy2))
return gx1, gx2, ggy1, ggy2
# TestFunctionTestSuccessful
#
# This test checks for successfull case.
# Incoming array types are also checked.
class FuncCorrectlyImplemented(chainer.FunctionNode):
def __init__(self, device):
self.device = device
def forward(self, inputs):
device = self.device
x1, x2 = inputs
if device.xp is chainerx:
fallback_device = device.fallback_device
assert isinstance(x1, fallback_device.supported_array_types)
assert isinstance(x2, fallback_device.supported_array_types)
self.retain_inputs((0, 1))
y1, y2 = _forward_correct(x1, x2)
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
device = self.device
x1, x2 = self.get_retained_inputs()
gy1, gy2 = grad_outputs
assert isinstance(x1.array, device.supported_array_types)
assert isinstance(x2.array, device.supported_array_types)
assert isinstance(gy1.array, device.supported_array_types)
assert isinstance(gy2.array, device.supported_array_types)
grad_func = FuncGradCorrectlyImplemented(device)
return grad_func.apply((x1, x2, gy1, gy2))
class FuncGradCorrectlyImplemented(chainer.FunctionNode):
def __init__(self, device):
self.device = device
def forward(self, inputs_and_grad_outputs):
device = self.device
x1, x2, gy1, gy2 = inputs_and_grad_outputs
if device.xp is chainerx:
fallback_device = device.fallback_device
assert isinstance(gy1, fallback_device.supported_array_types)
assert isinstance(gy2, fallback_device.supported_array_types)
self.retain_inputs((0, 1, 2, 3))
ggx1, ggx2 = _backward_correct(x1, x2, gy1, gy2)
return utils.force_array(ggx1), utils.force_array(ggx2)
def backward(self, indexes, grad_grad_inputs):
device = self.device
ggx1, ggx2 = grad_grad_inputs
assert isinstance(ggx1, chainer.Variable)
assert isinstance(ggx2, chainer.Variable)
assert isinstance(ggx1.array, device.supported_array_types)
assert isinstance(ggx2.array, device.supported_array_types)
x1, x2, gy1, gy2 = self.get_retained_inputs()
assert isinstance(x1, chainer.Variable)
assert isinstance(x2, chainer.Variable)
assert isinstance(gy1, chainer.Variable)
assert isinstance(gy2, chainer.Variable)
assert isinstance(x1.array, device.supported_array_types)
assert isinstance(x2.array, device.supported_array_types)
assert isinstance(gy1.array, device.supported_array_types)
assert isinstance(gy2.array, device.supported_array_types)
gx1, gx2, ggy1, ggy2 = _double_backward_correct(
x1, x2, gy1, gy2, ggx1, ggx2)
return gx1, gx2, ggy1, ggy2
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), (), (2, 0, 3)],
}))
@_inject_backend_tests
class TestFunctionTestSuccessful(testing.FunctionTestCase):
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncCorrectlyImplemented(device)
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# TestFunctionTestIncorrectForward
#
# This test checks if it can detect incorrect forward implementation.
class FuncWithIncorrectForward(chainer.FunctionNode):
def forward(self, inputs):
x1, x2 = inputs
y1, y2 = _forward_correct(x1, x2)
y1, y2 = utils.force_array(y1), utils.force_array(y2)
y2[...] += 1 # ! make incorrect
return y1, y2
def backward(self, *args, **kwargs):
assert False # should never be called
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), ()],
}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectForward(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithIncorrectForward()
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# TestFunctionTestIncorrectBackward
#
# This test checks if it can detect incorrect backward implementation.
class FuncWithIncorrectBackward(chainer.FunctionNode):
def forward(self, inputs):
x1, x2 = inputs
y1, y2 = _forward_correct(x1, x2)
self.retain_inputs((0, 1))
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
gy1, gy2 = grad_outputs
x1, x2 = self.get_retained_inputs()
ggx1, ggx2 = _backward_correct(x1, x2, gy1, gy2)
ggx2 = ggx2 + 10000 # ! make incorrect
return utils.force_array(ggx1), utils.force_array(ggx2)
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), ()],
}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectBackward(testing.FunctionTestCase):
skip_forward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithIncorrectBackward()
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# TestFunctionTestIncorrectDoubleBackward
#
# This test checks if it can detect incorrect double backward implementation.
class FuncWithIncorrectDoubleBackward(chainer.FunctionNode):
def forward(self, inputs):
x1, x2 = inputs
y1, y2 = _forward_correct(x1, x2)
self.retain_inputs((0, 1))
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
gy1, gy2 = grad_outputs
grad_func = FuncGradWithIncorrectDoubleBackward()
return grad_func.apply((x1, x2, gy1, gy2))
class FuncGradWithIncorrectDoubleBackward(chainer.FunctionNode):
def forward(self, inputs_and_grad_outputs):
x1, x2, gy1, gy2 = inputs_and_grad_outputs
self.retain_inputs((0, 1, 2, 3))
ggx1, ggx2 = _backward_correct(x1, x2, gy1, gy2)
return utils.force_array(ggx1), utils.force_array(ggx2)
def backward(self, indexes, grad_grad_inputs):
ggx1, ggx2 = grad_grad_inputs
x1, x2, gy1, gy2 = self.get_retained_inputs()
gx1, gx2, ggy1, ggy2 = _double_backward_correct(
x1, x2, gy1, gy2, ggx1, ggx2)
ggy2 = ggy2 + 10000 # ! make incorrect
return gx1, gx2, ggy1, ggy2
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), ()],
}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectDoubleBackward(testing.FunctionTestCase):
skip_forward_test = True
skip_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithIncorrectDoubleBackward()
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# FunctionTestCaseArrayContiguousnessTest
#
# This test checks incoming array contiguousness.
# As it's not possible to assume contiguousness of incoming arrays consistently
# (because gradient_check passes contiguous arrays in numerical_grad),
# we instead simulate the test failure. The function implementation raises an
# error if an incoming array matches the expected contiguousness and we expect
# the failure.
class _ContiguousnessMatched(Exception):
pass
def _is_f_contiguous(shape, strides, itemsize):
if numpy.prod(shape) <= 1:
return True
for sh, st in zip(shape, reversed(strides)):
if sh == 1:
continue
if st != itemsize:
return False
itemsize *= sh
return True
def _get_contiguousness(arr):
if isinstance(arr, chainerx.ndarray):
c_contig = arr.is_contiguous
f_contig = _is_f_contiguous(
arr.shape, arr.strides, arr.itemsize)
return (c_contig, f_contig)
return (arr.flags.c_contiguous, arr.flags.f_contiguous)
def _check_contiguousness(arr, expected_contiguous):
if isinstance(arr, chainer.Variable):
_check_contiguousness(arr.array, expected_contiguous)
return
c_contig, f_contig = _get_contiguousness(arr)
if numpy.prod(arr.shape) <= 1:
return # not applicable for this shape
if expected_contiguous is None:
# expected to be non-contiguous
if not c_contig and not f_contig:
raise _ContiguousnessMatched()
elif expected_contiguous == 'C':
# expected to be C-contiguous
if c_contig:
raise _ContiguousnessMatched()
else:
assert False
class FuncWithContiguousnessCheck(chainer.FunctionNode):
def __init__(self, contiguous, check_on):
self.contiguous = contiguous
self.check_on = check_on
def _check_contiguousness(self, arr):
assert isinstance(arr, chainer.get_array_types())
_check_contiguousness(arr, self.contiguous)
def forward(self, inputs):
x1, x2 = inputs
if self.check_on == 'forward_input':
self._check_contiguousness(x1)
self._check_contiguousness(x2)
self.retain_inputs((0, 1))
y1, y2 = _forward_correct(x1, x2)
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
gy1, gy2 = grad_outputs
if self.check_on == 'backward_retained_input':
self._check_contiguousness(x1.array)
self._check_contiguousness(x2.array)
elif self.check_on == 'backward_grad_output':
self._check_contiguousness(gy1.array)
self._check_contiguousness(gy2.array)
grad_func = FuncGradWithContiguousnessCheck(
self.contiguous, self.check_on)
return grad_func.apply((x1, x2, gy1, gy2))
class FuncGradWithContiguousnessCheck(chainer.FunctionNode):
def __init__(self, contiguous, check_on):
self.contiguous = contiguous
self.check_on = check_on
def _check_contiguousness(self, arr):
_check_contiguousness(arr, self.contiguous)
def forward(self, inputs_and_grad_outputs):
x1, x2, gy1, gy2 = inputs_and_grad_outputs
self.retain_inputs((0, 1, 2, 3))
ggx1, ggx2 = _backward_correct(x1, x2, gy1, gy2)
return utils.force_array(ggx1), utils.force_array(ggx2)
def backward(self, indexes, grad_grad_inputs):
ggx1, ggx2 = grad_grad_inputs
if self.check_on == 'double_backward_grad_grad_input':
self._check_contiguousness(ggx1)
self._check_contiguousness(ggx2)
x1, x2, gy1, gy2 = self.get_retained_inputs()
gx1, gx2, ggy1, ggy2 = _double_backward_correct(
x1, x2, gy1, gy2, ggx1, ggx2)
return gx1, gx2, ggy1, ggy2
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1, 2)],
'contiguous': [None, 'C'],
'check_on': [ # Check points in which cotiguousness is probed.
'forward_input',
# TODO(niboshi): As gradient_check.check_backward currently copies the
# grads without preserving strides, they cannot be non-contiguous.
# Enable this check after check_backward will be fixed.
# 'backward_grad_output',
'backward_retained_input',
# TODO(niboshi): Enable this check after check_backward will be fixed.
# 'double_backward_grad_grad_input',
]}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=_ContiguousnessMatched)
class FunctionTestCaseArrayContiguousnessTest(testing.FunctionTestCase):
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithContiguousnessCheck(self.contiguous, self.check_on)
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
def before_test(self, test_name):
# Some combinations of test methods and check points are irrelevant.
# Skip such combinations.
# For example, `test_forward` method does not generate grad_outputs.
if test_name == 'test_forward':
if self.check_on != 'forward_input':
raise unittest.SkipTest()
if test_name == 'test_backward':
if self.check_on == 'double_backward_grad_grad_input':
raise unittest.SkipTest()
testing.run_module(__name__, __file__)
| 34.011236
| 79
| 0.663099
|
c3cb38fb1d2b8970733c6cde409825a4e3adede3
| 348
|
py
|
Python
|
src/covid_health/transcoding/names/ecdc.py
|
ggbaro/covid-health-ita
|
267801c3de021078a1ca5d3b93b47515315f0300
|
[
"MIT"
] | 3
|
2020-03-25T22:20:07.000Z
|
2020-03-29T10:01:24.000Z
|
src/covid_health/transcoding/names/ecdc.py
|
ggbaro/covid-health-ita
|
267801c3de021078a1ca5d3b93b47515315f0300
|
[
"MIT"
] | null | null | null |
src/covid_health/transcoding/names/ecdc.py
|
ggbaro/covid-health-ita
|
267801c3de021078a1ca5d3b93b47515315f0300
|
[
"MIT"
] | null | null | null |
col = {
"ecdc": {
"year": "year",
"month": "month",
"day": "day",
"cases": "active_cases",
"deaths": "n_deceased",
"countriesAndTerritories": "geo",
"geoId": "geo_id",
"countryterritoryCode": "country_code",
"popData2018": "population",
}
}
var = {
"ecdc": {},
}
| 19.333333
| 47
| 0.462644
|
7bfa62e33990df14beff010a84b4c08ca460ce66
| 462
|
py
|
Python
|
mmdet/datasets/voc_coco.py
|
CityU-AIM-Group/HTD
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
[
"MIT"
] | 5
|
2022-02-18T16:26:29.000Z
|
2022-03-07T07:25:20.000Z
|
build/lib/mmdet/datasets/voc_coco.py
|
CityU-AIM-Group/HTD
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
[
"MIT"
] | 1
|
2022-02-24T12:51:19.000Z
|
2022-02-28T06:31:15.000Z
|
mmdet/datasets/voc_coco.py
|
CityU-AIM-Group/HTD
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
[
"MIT"
] | null | null | null |
import mmcv
import numpy as np
from . import CocoDataset
from .builder import DATASETS
from .custom import CustomDataset
import pickle as pkl
@DATASETS.register_module()
class VOCDataset_coco(CocoDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
| 33
| 78
| 0.634199
|
525529c8146bbe2f6674245e7fff7556b989a785
| 132
|
py
|
Python
|
software-development/unit-tests/python/start.py
|
fahimfarhan/legendary-coding-odyssey
|
55289e05aa04f866201c607bed00c505cd9c4df9
|
[
"MIT"
] | 3
|
2019-07-20T07:26:31.000Z
|
2020-08-06T09:31:09.000Z
|
software-development/unit-tests/python/start.py
|
fahimfarhan/legendary-coding-odyssey
|
55289e05aa04f866201c607bed00c505cd9c4df9
|
[
"MIT"
] | null | null | null |
software-development/unit-tests/python/start.py
|
fahimfarhan/legendary-coding-odyssey
|
55289e05aa04f866201c607bed00c505cd9c4df9
|
[
"MIT"
] | 4
|
2019-06-20T18:43:32.000Z
|
2020-10-07T16:45:23.000Z
|
def add(a,b):
return a+b
def sub(a,b):
return a-b
def multiply(a,b):
return a*b
def div(a,b):
return a/b
| 11
| 18
| 0.522727
|
9477b497d26a2684a9cd0ea6a24ac7ee76a9291b
| 730
|
py
|
Python
|
src/shared/widgets.py
|
kierrez/movie-website
|
74f4ed018aba545dec190b70d62abe0ac6085462
|
[
"MIT"
] | 1
|
2019-03-02T20:06:16.000Z
|
2019-03-02T20:06:16.000Z
|
src/shared/widgets.py
|
kierrez/movie-website
|
74f4ed018aba545dec190b70d62abe0ac6085462
|
[
"MIT"
] | 1
|
2022-01-07T22:57:41.000Z
|
2022-01-07T22:57:41.000Z
|
src/shared/widgets.py
|
kierrez/movie-website
|
74f4ed018aba545dec190b70d62abe0ac6085462
|
[
"MIT"
] | null | null | null |
from django.forms.widgets import SelectMultiple, ClearableFileInput, NumberInput, DateInput
class MySelectMultipleWidget(SelectMultiple):
template_name = 'shared/widgets/select.html'
# option_template_name = 'widgets/select_option.html'
class MyClearableFileInput(ClearableFileInput):
template_name = 'shared/widgets/clearable_file_input.html'
class MyRatingWidget(NumberInput):
"""widget that renders hidden input and star rating. Clicking on rating will set input's value"""
template_name = 'shared/widgets/rating.html'
class MyDateWidget(DateInput):
"""widget that renders hidden input and star rating. Clicking on rating will set input's value"""
template_name = 'shared/widgets/date.html'
| 34.761905
| 101
| 0.780822
|
ff76f7b38ce2318beb749ed50e2d2134dd3f335f
| 15,203
|
py
|
Python
|
analysis/print_lasso.py
|
fyquah95/fyp-worker-dispatcher
|
1ebf764e41202d18ccd1013ec2270caa6c44565a
|
[
"RSA-MD",
"CECILL-B"
] | null | null | null |
analysis/print_lasso.py
|
fyquah95/fyp-worker-dispatcher
|
1ebf764e41202d18ccd1013ec2270caa6c44565a
|
[
"RSA-MD",
"CECILL-B"
] | null | null | null |
analysis/print_lasso.py
|
fyquah95/fyp-worker-dispatcher
|
1ebf764e41202d18ccd1013ec2270caa6c44565a
|
[
"RSA-MD",
"CECILL-B"
] | null | null | null |
import argparse
import logging
import os
import pickle
import sys
import StringIO
import numpy as np
from numpy import linalg
import sexpdata
import matplotlib.pyplot as plt
import constants
import inlining_constants
import inlining_overrides
import inlining_tree
import learn_problem
import learn_linear_general_reward
from learn_lasso_with_alpha import HyperParameters
import sexp_utils
parser = argparse.ArgumentParser(description="formulate the problem")
parser.add_argument("--experiment-dir", type=str, help="experiment dir")
parser.add_argument("--problem-dir", type=str, help="problem dir")
parser.add_argument("--output", type=str, help="output", default="/dev/stdout")
parser.add_argument("--skip-normalisation", action="store_true")
group1 = parser.add_mutually_exclusive_group(required=True)
group1.add_argument(
"--opt-info", action="store_true",
help="dump about how much the model has done something")
group1.add_argument(
"--optimal-decision", action="store_true",
help="dump about how much the model has done something")
group1.add_argument(
"--inspect-run", type=int, default=None,
help="dump about how much the model has done something")
group1.add_argument(
"--inspect-rewards", action="store_true",
help="Inspect values learnt for nodes in the inlining tree.")
group1.add_argument(
"--dump-rewards", action="store_true",
help="Inspect values learnt for nodes in the inlining tree.")
group1.add_argument(
"--plot-rewards", action="store_true",
help="Inspect values learnt for nodes in the inlining tree.")
def choose_left(a, b):
if a is None:
return False
elif b is None:
return True
else:
return a > b
def neg_inf_if_none(a):
if a is None:
return -np.inf
else:
return a
def prettify(n):
if n == np.inf:
return " INF"
elif n == -np.inf:
return " -INF"
elif n > 0:
return " %.3f" % n
else:
return "%.3f" % n
def format_trace_item(trace_item):
if trace_item[0] == "declaration":
return "{" + trace_item[1].closure_origin.id() + "}"
elif trace_item[0] == "function":
return "<" + trace_item[1].path[-1].id() + ">"
else:
assert False
def print_tree(tree, depth=0):
print "(%s | %s)\t%s%s" % (
prettify(neg_inf_if_none(tree.value[0])),
prettify(neg_inf_if_none(tree.value[1])),
str("--" * depth),
str("<ROOT>" if len(tree.name.trace) == 0 else " " + str(format_trace_item(tree.name.trace[-1]))))
for child in tree.children:
print_tree(child, depth=depth + 1)
def build_optimal_tree(
tree,
hyperparams,
normalise_with_num_children,
record_path_long_term_rewards):
"""
args:
tree<name: path, value: (float * float)>
hyperparams
normalise_with_num_children: boolean
returns:
(tree<name: [ Inlined | Apply | Decl ], value: [ Function_call | Closure_origin ]>, float)
"""
assert isinstance(tree, inlining_tree.Node)
optimal_children = []
if len(tree.children) > 0:
acc = []
value_acc = 0.0
for child in tree.children:
acc.append(build_optimal_tree(
child, hyperparams, normalise_with_num_children,
record_path_long_term_rewards=record_path_long_term_rewards))
value_acc += acc[-1][1]
optimal_children.append(acc[-1][0])
children_value = hyperparams.decay_factor * value_acc
if normalise_with_num_children:
children_value = (children_value / float(len(tree.children)))
else:
children_value = 0.0
assert tree.value[0] is not None or tree.value[1] is not None
if not tree.name.is_apply_node():
assert tree.value[0] is not None
assert tree.value[1] is None
lhs_value = neg_inf_if_none(tree.value[0]) + children_value
rhs_value = neg_inf_if_none(tree.value[1])
assert isinstance(tree.name, inlining_tree.Absolute_path)
if tree.value[0] is None:
record_path_long_term_rewards[tree.name] = None
else:
record_path_long_term_rewards[tree.name] = lhs_value
if len(tree.name.trace) == 0:
tree = inlining_tree.Node(
name="Top_level", value=None, children=optimal_children)
return (tree, lhs_value)
if not tree.name.is_apply_node():
func = tree.name.trace[-1][1]
assert isinstance(func, inlining_tree.Function_metadata)
value = func
tree = inlining_tree.Node(
name="Decl", value=value, children=optimal_children)
return (tree, lhs_value)
local_path = tree.name.trace[-1][1]
func = tree.name.trace[-1][2]
assert isinstance(func, inlining_tree.Function_metadata)
assert isinstance(local_path, inlining_tree.Local_path)
value = inlining_tree.Function_call(function=func, path=local_path)
if lhs_value > rhs_value:
tree = inlining_tree.Node(
name="Inlined", value=value, children=optimal_children)
return (tree, lhs_value)
else:
tree = inlining_tree.Node(name="Apply", value=value, children=[])
return (tree, rhs_value)
def project_benefit_tree(
root, hyperparams, id_to_tree_path, adjacency_list, contributions, mask, indent=0, normalise_with_num_children=False, visited=None):
space = " " * indent
assert not (mask[2 * root] and mask[2 * root + 1])
visited.add(root)
if mask[2 * root]:
logging.info(
"%sLooking into %d(%s)" % (space, root, id_to_tree_path[root]))
base = contributions[2 * root]
acc = 0.0
num_children = len(adjacency_list[root])
for child in adjacency_list[root]:
assert isinstance(child, int)
child_value = project_benefit_tree(
child, hyperparams, id_to_tree_path, adjacency_list,
contributions, mask,
indent=indent+1,
normalise_with_num_children=normalise_with_num_children,
visited=visited)
if child_value is None:
num_children -= 1
else:
acc += child_value
if num_children:
if normalise_with_num_children:
norm_factor = float(num_children)
else:
norm_factor = 1.0
return base + (hyperparams.decay_factor * acc / norm_factor)
else:
return base
elif mask[2 * root + 1]:
logging.info(
"%sTerminating at %d(%s)" % (space, root, id_to_tree_path[root]))
return contributions[2 * root + 1]
else:
logging.info(
"%sFailed to project benefit at %d(%s)" % (space, root, id_to_tree_path[root]))
return None
def run(argv):
logging.getLogger().setLevel(logging.INFO)
args = parser.parse_args(argv)
hyperparams_path = os.path.join(args.experiment_dir, "hyperparams.pkl")
problem = inlining_tree.Problem.load(args.problem_dir)
with open(hyperparams_path, "rb") as f:
hyperparams = pickle.load(f)
normalise_with_num_children = not args.skip_normalisation
assert not normalise_with_num_children
problem_matrices = learn_problem.construct_problem_matrices(
problem, hyperparams,
normalise_with_num_children=normalise_with_num_children)
target_benefit = learn_linear_general_reward.construct_benefit_from_exec_time(
hyperparams.benefit_function, problem)
num_nodes = problem_matrices.participation_mask.shape[1] / 2
participation_count = np.sum(problem_matrices.participation_mask, axis=0)
w = np.load(
os.path.join(args.experiment_dir, "contributions.npy"))
def fill_node_values(node):
node_id = node.name
if participation_count[node_id * 2] > 0:
lhs = w[node_id * 2]
else:
lhs = None
if participation_count[node_id * 2 + 1] > 0:
rhs = w[node_id * 2 + 1]
else:
rhs = None
return (node.name, (lhs, rhs))
def rename_id_to_path(node):
return (id_to_tree_path[node.name], node.value)
if args.opt_info:
A = problem_matrices.benefit_relations
squared_errors = np.power(target_benefit - np.matmul(A, w), 2)
mse = np.mean(squared_errors)
projected_benefits = np.matmul(A, w)
print "Mean squared error:", mse
print "Mimimum projected:", min(projected_benefits)
print "Maximum projected:", max(projected_benefits)
print "Mimimum error:", min(squared_errors)
print "Maximum error:", max(squared_errors)
obtained = np.matmul(A, w)
target = target_benefit
elif args.dump_rewards:
A = problem_matrices.benefit_relations
adjacency_list = inlining_tree.adjacency_list_from_edge_lists(
num_nodes=num_nodes,
edge_lists=problem.edges_lists)
tree_path_to_ids = problem.properties.tree_path_to_ids
id_to_tree_path = {v: k for k, v in tree_path_to_ids.iteritems()}
root = tree_path_to_ids[inlining_tree.Absolute_path([])]
tree = inlining_tree.build_from_adjacency_list(
[None] * num_nodes, root, adjacency_list)
tree = tree.map(f=fill_node_values)
tree = tree.map(f=rename_id_to_path)
record_path_long_term_rewards = {}
(optimal_tree, value) = build_optimal_tree(
tree, hyperparams, normalise_with_num_children,
record_path_long_term_rewards=record_path_long_term_rewards)
arr = []
for i in range(num_nodes):
if participation_count[2 * i] > 0:
long_term = record_path_long_term_rewards[id_to_tree_path[i]]
inline_reward = [[
["immediate", w[2 * i]],
["long_term", long_term]
]]
else:
inline_reward = []
if participation_count[2 * i + 1] > 0:
no_inline_reward = w[2 * i + 1]
else:
no_inline_reward = None
no_inline_reward = inlining_tree.sexp_of_option(
no_inline_reward, f=str)
arr.append([
["path", id_to_tree_path[i].to_sexp()],
["inline_reward", inline_reward],
["no_inline_reward", no_inline_reward],
])
print sexpdata.dumps(arr)
elif args.inspect_rewards:
A = problem_matrices.benefit_relations
adjacency_list = inlining_tree.adjacency_list_from_edge_lists(
num_nodes=num_nodes,
edge_lists=problem.edges_lists)
tree_path_to_ids = problem.properties.tree_path_to_ids
id_to_tree_path = {v: k for k, v in tree_path_to_ids.iteritems()}
root = tree_path_to_ids[inlining_tree.Absolute_path([])]
tree = inlining_tree.build_from_adjacency_list(
[None] * num_nodes, root, adjacency_list)
tree = tree.map(f=fill_node_values)
tree = tree.map(f=rename_id_to_path)
print_tree(tree)
elif args.optimal_decision:
tree_path_to_ids = problem.properties.tree_path_to_ids
id_to_tree_path = {v: k for k, v in tree_path_to_ids.iteritems()}
adjacency_list = inlining_tree.adjacency_list_from_edge_lists(
num_nodes=num_nodes,
edge_lists=problem.edges_lists)
root = tree_path_to_ids[inlining_tree.Absolute_path([])]
tree = inlining_tree.build_from_adjacency_list(
[None] * num_nodes, root, adjacency_list)
tree = tree.map(f=fill_node_values)
tree = tree.map(f=rename_id_to_path)
(optimal_tree, value) = build_optimal_tree(
tree, hyperparams, normalise_with_num_children,
record_path_long_term_rewards={})
sexp_optimal_tree = inlining_tree.sexp_of_top_level(
optimal_tree)
logging.info("Optimal decision has a value of %f" % value)
sexp_buffer = StringIO.StringIO()
sexp_utils.dump_without_quotes(sexp_buffer, sexp_optimal_tree)
with open(args.output, "w") as f:
f.write(sexp_buffer.getvalue())
elif args.inspect_run is not None:
index = args.inspect_run
adjacency_list = inlining_tree.adjacency_list_from_edge_lists(
num_nodes=num_nodes,
edge_lists=problem.edges_lists)
tree_path_to_ids = problem.properties.tree_path_to_ids
id_to_tree_path = {v: k for k, v in tree_path_to_ids.iteritems()}
A = problem_matrices.benefit_relations
target_benefit = target_benefit[index]
projected_benefit = np.matmul(A, w)[index]
participation_mask = problem_matrices.participation_mask[index, :]
assert participation_mask.shape == (num_nodes * 2,)
visited = set()
projected_benefit_with_dfs = project_benefit_tree(
root=tree_path_to_ids[inlining_tree.Absolute_path([])],
hyperparams=hyperparams,
adjacency_list=adjacency_list,
id_to_tree_path=id_to_tree_path,
contributions=w,
mask=participation_mask,
normalise_with_num_children=normalise_with_num_children,
visited=visited)
visited_count = 0
for i in range(num_nodes):
if participation_mask[i * 2] or participation_mask[i * 2 + 1]:
visited_count += 1
if i not in visited:
print "DID NOT VISIT", i, id_to_tree_path[i]
print "--- Information on run %d ---" % index
print "Execution directory =", problem.execution_directories[index]
print "Target benefit =", target_benefit
print "Execution time =", problem.execution_times[index]
print "Projected benefit (with matmul) =", projected_benefit
print "Projected benefit (with DFS) =", projected_benefit_with_dfs
print "Number of visited nodes =", visited_count
print "Number of nodes in problem =", num_nodes
adjacency_list = []
for _ in range(num_nodes):
adjacency_list.append(set())
for edge in problem.edges_lists[index]:
adjacency_list[edge[0]].add((edge[1]))
bfs_edge_list(adjacency_list, id_to_tree_path)
elif args.plot_rewards:
arr = []
for i in range(len(w)):
if participation_count[i] > 0 and abs(w[i]) > 1e-10:
arr.append(w[i])
arr = np.log(abs(np.array(arr)))
print len(arr)
print abs(arr).min()
plt.hist(arr)
plt.show()
else:
assert False
def bfs_edge_list(adjacency_list, id_to_tree_path):
def loop(root):
print "%d\t%s" % (root, id_to_tree_path[root])
for child in adjacency_list[root]:
loop(child)
loop(0)
def main():
run(sys.argv[1:])
if __name__ == "__main__":
main()
| 36.197619
| 140
| 0.628692
|
6460eabcb57062982b2248f5b300272b29d76d74
| 1,404
|
py
|
Python
|
Example.py
|
Chatreux/sudoku-solver
|
5894aafe5a3870d433a16ecba655f2fad19fcadf
|
[
"CC0-1.0"
] | null | null | null |
Example.py
|
Chatreux/sudoku-solver
|
5894aafe5a3870d433a16ecba655f2fad19fcadf
|
[
"CC0-1.0"
] | null | null | null |
Example.py
|
Chatreux/sudoku-solver
|
5894aafe5a3870d433a16ecba655f2fad19fcadf
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: cp1252 -*-
#/usr/bin/env python
#Simon H. Larsen
#Buttons.py - example
#Project startet: d. 28. august 2012
#Import pygame modules and Buttons.py(it must be in same dir)
import pygame, Buttons
from pygame.locals import *
#Initialize pygame
pygame.init()
class Button_Example:
def __init__(self):
self.main()
#Create a display
def display(self):
self.screen = pygame.display.set_mode((650,370),0,32)
pygame.display.set_caption("Buttons.py - example")
#Update the display and show the button
def update_display(self):
self.screen.fill((30,144,255))
#Parameters: surface, color, x, y, length, height, width, text, text_color
self.Button1.create_button(self.screen, (107,142,35), 225, 135, 200, 100, 0, "Example", (255,255,255))
pygame.display.flip()
#Run the loop
def main(self):
self.Button1 = Buttons.Button()
self.display()
while True:
self.update_display()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == MOUSEBUTTONDOWN:
if self.Button1.pressed(pygame.mouse.get_pos()):
print "Give me a command!"
if __name__ == '__main__':
obj = Button_Example()
| 31.2
| 123
| 0.586182
|
df8f826f69eb9d76887dc9f2969ff93bd6cf0102
| 63,369
|
py
|
Python
|
pytorch_lightning/metrics/sklearns.py
|
Alicegaz/pytorch-lightning
|
96b45f15aa04e0ad7e0ed20b8717ab1599148966
|
[
"Apache-2.0"
] | 2
|
2020-11-28T21:38:35.000Z
|
2021-03-16T12:30:18.000Z
|
pytorch_lightning/metrics/sklearns.py
|
Alicegaz/pytorch-lightning
|
96b45f15aa04e0ad7e0ed20b8717ab1599148966
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/metrics/sklearns.py
|
Alicegaz/pytorch-lightning
|
96b45f15aa04e0ad7e0ed20b8717ab1599148966
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Optional, Union, Sequence, List
import numpy as np
import torch
from pytorch_lightning import _logger as lightning_logger
from pytorch_lightning.metrics.metric import NumpyMetric
from pytorch_lightning.utilities import rank_zero_warn
try:
from torch.distributed import ReduceOp, group
except ImportError:
class ReduceOp:
SUM = None
class group:
WORLD = None
rank_zero_warn('Unsupported `ReduceOp` for distributed computing.')
class SklearnMetric(NumpyMetric):
"""
Bridge between PyTorch Lightning and scikit-learn metrics
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Note:
The order of targets and predictions may be different from the order typically used in PyTorch
"""
def __init__(
self,
metric_name: str,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
**kwargs,
):
"""
Args:
metric_name: the metric name to import and compute from scikit-learn.metrics
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
**kwargs: additonal keyword arguments (will be forwarded to metric call)
"""
super().__init__(name=metric_name,
reduce_group=reduce_group,
reduce_op=reduce_op)
self.metric_kwargs = kwargs
lightning_logger.debug(
f'Metric {self.__class__.__name__} is using Sklearn as backend, meaning that'
' every metric call will cause a GPU synchronization, which may slow down your code'
)
@property
def metric_fn(self):
import sklearn.metrics
return getattr(sklearn.metrics, self.name)
def forward(self, *args, **kwargs) -> Union[np.ndarray, int, float]:
"""
Carries the actual metric computation
Args:
*args: Positional arguments forwarded to metric call (should be already converted to numpy)
**kwargs: keyword arguments forwarded to metric call (should be already converted to numpy)
Return:
the metric value (will be converted to tensor by baseclass)
"""
return self.metric_fn(*args, **kwargs, **self.metric_kwargs)
class Accuracy(SklearnMetric):
"""
Calculates the Accuracy Score
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([0, 1, 2, 3])
>>> y_true = torch.tensor([0, 1, 2, 2])
>>> metric = Accuracy()
>>> metric(y_pred, y_true)
tensor([0.7500])
"""
def __init__(
self,
normalize: bool = True,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
normalize: If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__(metric_name='accuracy_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
normalize=normalize)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> float:
"""
Computes the accuracy
Args:
y_pred: the array containing the predictions (already in categorical form)
y_true: the array containing the targets (in categorical form)
sample_weight: Sample weights.
Return:
Accuracy Score
"""
return super().forward(y_pred=y_pred, y_true=y_true, sample_weight=sample_weight)
class AUC(SklearnMetric):
"""
Calculates the Area Under the Curve using the trapoezoidal rule
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([0, 1, 2, 3])
>>> y_true = torch.tensor([0, 1, 2, 2])
>>> metric = AUC()
>>> metric(y_pred, y_true)
tensor([4.])
"""
def __init__(
self,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__(metric_name='auc',
reduce_group=reduce_group,
reduce_op=reduce_op)
def forward(self, x: np.ndarray, y: np.ndarray) -> float:
"""
Computes the AUC
Args:
x: x coordinates.
y: y coordinates.
Return:
AUC calculated with trapezoidal rule
"""
return super().forward(x=x, y=y)
class AveragePrecision(SklearnMetric):
"""
Calculates the average precision (AP) score.
"""
def __init__(
self,
average: Optional[str] = 'macro',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
average: If None, the scores for each class are returned. Otherwise, this determines the type of
averaging performed on the data:
* If 'micro': Calculate metrics globally by considering each element of the label indicator
matrix as a label.
* If 'macro': Calculate metrics for each label, and find their unweighted mean.
This does not take label imbalance into account.
* If 'weighted': Calculate metrics for each label, and find their average, weighted by
support (the number of true instances for each label).
* If 'samples': Calculate metrics for each instance, and find their average.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('average_precision_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
average=average)
def forward(
self,
y_score: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> float:
"""
Args:
y_score: Target scores, can either be probability estimates of the positive class,
confidence values, or binary decisions.
y_true: True binary labels in binary label indicators.
sample_weight: Sample weights.
Return:
average precision score
"""
return super().forward(y_score=y_score, y_true=y_true,
sample_weight=sample_weight)
class BalancedAccuracy(SklearnMetric):
""" Compute the balanced accuracy score
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([0, 0, 0, 1])
>>> y_true = torch.tensor([0, 0, 1, 1])
>>> metric = BalancedAccuracy()
>>> metric(y_pred, y_true)
tensor([0.7500])
"""
def __init__(
self,
adjusted: bool = False,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
adjusted: If ``True``, the result sis adjusted for chance, such that random performance
corresponds to 0 and perfect performance corresponds to 1
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('balanced_accuracy_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
adjusted=adjusted)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> float:
"""
Args:
y_pred: the array containing the predictions (already in categorical form)
y_true: the array containing the targets (in categorical form)
sample_weight: Sample weights.
Return:
balanced accuracy score
"""
return super().forward(y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight)
class CohenKappaScore(SklearnMetric):
"""
Calculates Cohens kappa: a statitic that measures inter-annotator agreement
Example:
>>> y_pred = torch.tensor([1, 2, 0, 2])
>>> y_true = torch.tensor([2, 2, 2, 1])
>>> metric = CohenKappaScore()
>>> metric(y_pred, y_true)
tensor([-0.3333])
"""
def __init__(
self,
labels: Optional[Sequence] = None,
weights: Optional[str] = None,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
labels: List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y1`` or ``y2`` are used in sorted order.
weights: string indicating weightning type used in scoring. None
means no weighting, string ``linear`` means linear weighted
and ``quadratic`` means quadratic weighted
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('cohen_kappa_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
labels=labels,
weights=weights)
def forward(
self,
y1: np.ndarray,
y2: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> float:
"""
Args:
y_1: Labels assigned by first annotator
y_2: Labels assigned by second annotator
sample_weight: Sample weights.
Return:
Cohens kappa score
"""
return super().forward(y1=y1, y2=y2, sample_weight=sample_weight)
class ConfusionMatrix(SklearnMetric):
"""
Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Example:
>>> y_pred = torch.tensor([0, 1, 2, 1])
>>> y_true = torch.tensor([0, 1, 2, 2])
>>> metric = ConfusionMatrix()
>>> metric(y_pred, y_true)
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 1., 1.]])
"""
def __init__(
self,
labels: Optional[Sequence] = None,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
labels: List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('confusion_matrix',
reduce_group=reduce_group,
reduce_op=reduce_op,
labels=labels)
def forward(self, y_pred: np.ndarray, y_true: np.ndarray) -> np.ndarray:
"""
Args:
y_pred: Estimated targets as returned by a classifier.
y_true: Ground truth (correct) target values.
Return:
Confusion matrix (array of shape [num_classes, num_classes])
"""
return super().forward(y_pred=y_pred, y_true=y_true)
class DCG(SklearnMetric):
""" Compute discounted cumulative gain
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_score = torch.tensor([[.1, .2, .3, 4, 70]])
>>> y_true = torch.tensor([[10, 0, 0, 1, 5]])
>>> metric = DCG()
>>> metric(y_score, y_true)
tensor([9.4995])
"""
def __init__(
self,
k: Optional[int] = None,
log_base: float = 2,
ignore_ties: bool = False,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
k: only consider the hightest k score in the ranking
log_base: base of the logarithm used for the discount
ignore_ties: If ``True``, assume there are no ties in y_score for efficiency gains
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('dcg_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
k=k,
log_base=log_base,
ignore_ties=ignore_ties)
def forward(
self,
y_score: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> float:
"""
Args:
y_score: target scores, either probability estimates, confidence values
or or non-thresholded measure of decisions
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
DCG score
"""
return super().forward(y_true=y_true,
y_score=y_score,
sample_weight=sample_weight)
class F1(SklearnMetric):
r"""
Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is:
.. math::
F_1 = 2 \cdot \frac{precision \cdot recall}{precision + recall}
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Example:
>>> y_pred = torch.tensor([0, 1, 2, 3])
>>> y_true = torch.tensor([0, 1, 2, 2])
>>> metric = F1()
>>> metric(y_pred, y_true)
tensor([0.6667])
References
- [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
"""
def __init__(
self,
labels: Optional[Sequence] = None,
pos_label: Union[str, int] = 1,
average: Optional[str] = 'macro',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
labels: Integer array of labels.
pos_label: The class to report if ``average='binary'``.
average: This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
* ``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
* ``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
* ``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
* ``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
* ``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('f1_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
labels=labels,
pos_label=pos_label,
average=average)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> Union[np.ndarray, float]:
"""
Args:
y_pred : Estimated targets as returned by a classifier.
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
"""
return super().forward(y_pred=y_pred, y_true=y_true, sample_weight=sample_weight)
class FBeta(SklearnMetric):
"""
Compute the F-beta score. The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Example:
>>> y_pred = torch.tensor([0, 1, 2, 3])
>>> y_true = torch.tensor([0, 1, 2, 2])
>>> metric = FBeta(beta=0.25)
>>> metric(y_pred, y_true)
tensor([0.7361])
References:
- [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
- [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
"""
def __init__(
self,
beta: float,
labels: Optional[Sequence] = None,
pos_label: Union[str, int] = 1,
average: Optional[str] = 'macro',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
beta: Weight of precision in harmonic mean.
labels: Integer array of labels.
pos_label: The class to report if ``average='binary'``.
average: This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
* ``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
* ``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
* ``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
* ``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
* ``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('fbeta_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> Union[np.ndarray, float]:
"""
Args:
y_pred : Estimated targets as returned by a classifier.
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
FBeta score of the positive class in binary classification or weighted
average of the FBeta scores of each class for the multiclass task.
"""
return super().forward(y_pred=y_pred, y_true=y_true, sample_weight=sample_weight)
class Hamming(SklearnMetric):
"""
Computes the average hamming loss
Example:
>>> y_pred = torch.tensor([0, 1, 2, 3])
>>> y_true = torch.tensor([1, 1, 2, 3])
>>> metric = Hamming()
>>> metric(y_pred, y_true)
tensor([0.2500])
"""
def __init__(
self,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('hamming_loss',
reduce_group=reduce_group,
reduce_op=reduce_op)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> Union[np.ndarray, float]:
"""
Args:
y_pred : Estimated targets as returned by a classifier.
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Average hamming loss
"""
return super().forward(y_pred=y_pred, y_true=y_true, sample_weight=sample_weight)
class Hinge(SklearnMetric):
"""
Computes the average hinge loss
Example:
>>> pred_decision = torch.tensor([-2.17, -0.97, -0.19, -0.43])
>>> y_true = torch.tensor([1, 1, 0, 0])
>>> metric = Hinge()
>>> metric(pred_decision, y_true)
tensor([1.6300])
"""
def __init__(
self,
labels: Optional[Sequence] = None,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
labels: Integer array of labels.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('hinge_loss',
reduce_group=reduce_group,
reduce_op=reduce_op,
labels=labels)
def forward(
self,
pred_decision: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> float:
"""
Args:
pred_decision : Predicted decisions
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Average hinge loss
"""
return super().forward(pred_decision=pred_decision,
y_true=y_true,
sample_weight=sample_weight)
class Jaccard(SklearnMetric):
"""
Calculates jaccard similarity coefficient score
Example:
>>> y_pred = torch.tensor([1, 1, 1])
>>> y_true = torch.tensor([0, 1, 1])
>>> metric = Jaccard()
>>> metric(y_pred, y_true)
tensor([0.3333])
"""
def __init__(
self,
labels: Optional[Sequence] = None,
pos_label: Union[str, int] = 1,
average: Optional[str] = 'macro',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
labels: Integer array of labels.
pos_label: The class to report if ``average='binary'``.
average: This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
* ``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
* ``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
* ``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
* ``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
* ``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('jaccard_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
labels=labels,
pos_label=pos_label,
average=average)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> Union[np.ndarray, float]:
"""
Args:
y_pred : Estimated targets as returned by a classifier.
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Jaccard similarity score
"""
return super().forward(y_pred=y_pred, y_true=y_true, sample_weight=sample_weight)
class Precision(SklearnMetric):
"""
Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Example:
>>> y_pred = torch.tensor([0, 1, 2, 3])
>>> y_true = torch.tensor([0, 1, 2, 2])
>>> metric = Precision()
>>> metric(y_pred, y_true)
tensor([0.7500])
"""
def __init__(
self,
labels: Optional[Sequence] = None,
pos_label: Union[str, int] = 1,
average: Optional[str] = 'macro',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
labels: Integer array of labels.
pos_label: The class to report if ``average='binary'``.
average: This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
* ``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
* ``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
* ``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
* ``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
* ``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('precision_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
labels=labels,
pos_label=pos_label,
average=average)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> Union[np.ndarray, float]:
"""
Args:
y_pred : Estimated targets as returned by a classifier.
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
"""
return super().forward(y_pred=y_pred, y_true=y_true, sample_weight=sample_weight)
class Recall(SklearnMetric):
"""
Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Example:
>>> y_pred = torch.tensor([0, 1, 2, 3])
>>> y_true = torch.tensor([0, 1, 2, 2])
>>> metric = Recall()
>>> metric(y_pred, y_true)
tensor([0.6250])
"""
def __init__(
self,
labels: Optional[Sequence] = None,
pos_label: Union[str, int] = 1,
average: Optional[str] = 'macro',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
labels: Integer array of labels.
pos_label: The class to report if ``average='binary'``.
average: This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
* ``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
* ``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
* ``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
* ``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
* ``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('recall_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
labels=labels,
pos_label=pos_label,
average=average)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> Union[np.ndarray, float]:
"""
Args:
y_pred : Estimated targets as returned by a classifier.
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
"""
return super().forward(y_pred=y_pred, y_true=y_true, sample_weight=sample_weight)
class PrecisionRecallCurve(SklearnMetric):
"""
Compute precision-recall pairs for different probability thresholds
Note:
This implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
"""
def __init__(
self,
pos_label: Union[str, int] = 1,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
pos_label: The class to report if ``average='binary'``.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('precision_recall_curve',
reduce_group=reduce_group,
reduce_op=reduce_op,
pos_label=pos_label)
def forward(
self,
probas_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> Union[np.ndarray, float]:
"""
Args:
probas_pred : Estimated probabilities or decision function.
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Returns:
precision:
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall:
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds:
Increasing thresholds on the decision function used to compute
precision and recall.
"""
# only return x and y here, since for now we cannot auto-convert elements of multiple length.
# Will be fixed in native implementation
return np.array(super().forward(probas_pred=probas_pred,
y_true=y_true,
sample_weight=sample_weight)[:2])
class ROC(SklearnMetric):
"""
Compute Receiver operating characteristic (ROC)
Note:
this implementation is restricted to the binary classification task.
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([0, 1, 2, 3])
>>> y_true = torch.tensor([0, 1, 2, 2])
>>> metric = ROC()
>>> fps, tps = metric(y_pred, y_true)
>>> fps
tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000])
>>> tps
tensor([0., 0., 0., 1., 1.])
References:
- [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
"""
def __init__(
self,
pos_label: Union[str, int] = 1,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
pos_labels: The class to report if ``average='binary'``.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('roc_curve',
reduce_group=reduce_group,
reduce_op=reduce_op,
pos_label=pos_label)
def forward(
self,
y_score: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> Union[np.ndarray, float]:
"""
Args:
y_score : Target scores, can either be probability estimates of the positive
class or confidence values.
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Returns:
fpr:
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr:
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds:
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
"""
return np.array(super().forward(y_score=y_score, y_true=y_true, sample_weight=sample_weight)[:2])
class AUROC(SklearnMetric):
"""
Compute Area Under the Curve (AUC) from prediction scores
Note:
this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
"""
def __init__(
self,
average: Optional[str] = 'macro',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
average: If None, the scores for each class are returned. Otherwise, this determines the type of
averaging performed on the data:
* If 'micro': Calculate metrics globally by considering each element of the label indicator
matrix as a label.
* If 'macro': Calculate metrics for each label, and find their unweighted mean.
This does not take label imbalance into account.
* If 'weighted': Calculate metrics for each label, and find their average, weighted by
support (the number of true instances for each label).
* If 'samples': Calculate metrics for each instance, and find their average.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('roc_auc_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
average=average)
def forward(
self,
y_score: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> float:
"""
Args:
y_score: Target scores, can either be probability estimates of the positive class,
confidence values, or binary decisions.
y_true: True binary labels in binary label indicators.
sample_weight: Sample weights.
Return:
Area Under Receiver Operating Characteristic Curve
"""
return super().forward(y_score=y_score, y_true=y_true,
sample_weight=sample_weight)
class ExplainedVariance(SklearnMetric):
"""
Calculates explained variance score
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([2.5, 0.0, 2, 8])
>>> y_true = torch.tensor([3, -0.5, 2, 7])
>>> metric = ExplainedVariance()
>>> metric(y_pred, y_true)
tensor([0.9572])
"""
def __init__(
self,
multioutput: Optional[Union[str, List[float]]] = 'variance_weighted',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
multioutput: either one of the strings [‘raw_values’, ‘uniform_average’, 'variance_weighted']
or an array with shape (n_outputs,) that defines how multiple
output values should be aggregated.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('explained_variance_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
multioutput=multioutput)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
):
"""
Args:
y_pred: Estimated target values
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Explained variance score
"""
return super().forward(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
class MeanAbsoluteError(SklearnMetric):
"""
Compute absolute error regression loss
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([2.5, 0.0, 2, 8])
>>> y_true = torch.tensor([3, -0.5, 2, 7])
>>> metric = MeanAbsoluteError()
>>> metric(y_pred, y_true)
tensor([0.5000])
"""
def __init__(
self,
multioutput: Optional[Union[str, List[float]]] = 'uniform_average',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
multioutput: either one of the strings [‘raw_values’, ‘uniform_average’]
or an array with shape (n_outputs,) that defines how multiple
output values should be aggregated.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('mean_absolute_error',
reduce_group=reduce_group,
reduce_op=reduce_op,
multioutput=multioutput)
def forward(self, y_pred: np.ndarray, y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None):
"""
Args:
y_pred: Estimated target values
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Mean absolute error
"""
return super().forward(y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight)
class MeanSquaredError(SklearnMetric):
"""
Compute mean squared error loss
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([2.5, 0.0, 2, 8])
>>> y_true = torch.tensor([3, -0.5, 2, 7])
>>> metric = MeanSquaredError()
>>> metric(y_pred, y_true)
tensor([0.3750])
>>> metric = MeanSquaredError(squared=True)
>>> metric(y_pred, y_true)
tensor([0.6124])
"""
def __init__(
self,
multioutput: Optional[Union[str, List[float]]] = 'uniform_average',
squared: bool = False,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
multioutput: either one of the strings [‘raw_values’, ‘uniform_average’]
or an array with shape (n_outputs,) that defines how multiple
output values should be aggregated.
squared: if ``True`` returns the mse value else the rmse value
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('mean_squared_error',
reduce_group=reduce_group,
reduce_op=reduce_op,
multioutput=multioutput)
self.squared = squared
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
):
"""
Args:
y_pred: Estimated target values
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Mean squared error
"""
mse = super().forward(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
if self.squared:
mse = np.sqrt(mse)
return mse
class MeanSquaredLogError(SklearnMetric):
"""
Calculates the mean squared log error
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([2.5, 5, 4, 8])
>>> y_true = torch.tensor([3, 5, 2.5, 7])
>>> metric = MeanSquaredLogError()
>>> metric(y_pred, y_true)
tensor([0.0397])
"""
def __init__(
self,
multioutput: Optional[Union[str, List[float]]] = 'uniform_average',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
multioutput: either one of the strings [‘raw_values’, ‘uniform_average’]
or an array with shape (n_outputs,) that defines how multiple
output values should be aggregated.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('mean_squared_log_error',
reduce_group=reduce_group,
reduce_op=reduce_op,
multioutput=multioutput)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
):
"""
Args:
y_pred: Estimated target values
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Mean squared log error
"""
return super().forward(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
class MedianAbsoluteError(SklearnMetric):
"""
Calculates the median absolute error
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([2.5, 0.0, 2, 8])
>>> y_true = torch.tensor([3, -0.5, 2, 7])
>>> metric = MedianAbsoluteError()
>>> metric(y_pred, y_true)
tensor([0.5000])
"""
def __init__(
self,
multioutput: Optional[Union[str, List[float]]] = 'uniform_average',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
multioutput: either one of the strings [‘raw_values’, ‘uniform_average’]
or an array with shape (n_outputs,) that defines how multiple
output values should be aggregated.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('median_absolute_error',
reduce_group=reduce_group,
reduce_op=reduce_op,
multioutput=multioutput)
def forward(self, y_pred: np.ndarray, y_true: np.ndarray):
"""
Args:
y_pred: Estimated target values
y_true: Ground truth (correct) target values.
Return:
Median absolute error
"""
return super().forward(y_true=y_true, y_pred=y_pred)
class R2Score(SklearnMetric):
"""
Calculates the R^2 score also known as coefficient of determination
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([2.5, 0.0, 2, 8])
>>> y_true = torch.tensor([3, -0.5, 2, 7])
>>> metric = R2Score()
>>> metric(y_pred, y_true)
tensor([0.9486])
"""
def __init__(
self,
multioutput: Optional[Union[str, List[float]]] = 'uniform_average',
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
multioutput: either one of the strings [‘raw_values’, ‘uniform_average’, 'variance_weighted']
or an array with shape (n_outputs,) that defines how multiple
output values should be aggregated.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('r2_score',
reduce_group=reduce_group,
reduce_op=reduce_op,
multioutput=multioutput)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
):
"""
Args:
y_pred: Estimated target values
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
R^2 score
"""
return super().forward(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
class MeanPoissonDeviance(SklearnMetric):
"""
Calculates the mean poisson deviance regression loss
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([2, 0.5, 1, 4])
>>> y_true = torch.tensor([0.5, 0.5, 2., 2.])
>>> metric = MeanPoissonDeviance()
>>> metric(y_pred, y_true)
tensor([0.9034])
"""
def __init__(
self,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('mean_poisson_deviance',
reduce_group=reduce_group,
reduce_op=reduce_op)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
):
"""
Args:
y_pred: Estimated target values
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Mean possion deviance
"""
return super().forward(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
class MeanGammaDeviance(SklearnMetric):
"""
Calculates the mean gamma deviance regression loss
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([0.5, 0.5, 2., 2.])
>>> y_true = torch.tensor([2, 0.5, 1, 4])
>>> metric = MeanGammaDeviance()
>>> metric(y_pred, y_true)
tensor([1.0569])
"""
def __init__(
self,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('mean_gamma_deviance',
reduce_group=reduce_group,
reduce_op=reduce_op)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
):
"""
Args:
y_pred: Estimated target values
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Mean gamma deviance
"""
return super().forward(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
class MeanTweedieDeviance(SklearnMetric):
"""
Calculates the mean tweedie deviance regression loss
Warning:
Every metric call will cause a GPU synchronization, which may slow down your code
Example:
>>> y_pred = torch.tensor([2, 0.5, 1, 4])
>>> y_true = torch.tensor([0.5, 0.5, 2., 2.])
>>> metric = MeanTweedieDeviance()
>>> metric(y_pred, y_true)
tensor([1.8125])
"""
def __init__(
self,
power: float = 0,
reduce_group: Any = group.WORLD,
reduce_op: Any = ReduceOp.SUM,
):
"""
Args:
power: tweedie power parameter:
* power < 0: Extreme stable distribution. Requires: y_pred > 0.
* power = 0 : Normal distribution, output corresponds to mean_squared_error.
y_true and y_pred can be any real numbers.
* power = 1 : Poisson distribution. Requires: y_true >= 0 and y_pred > 0.
* 1 < power < 2 : Compound Poisson distribution. Requires: y_true >= 0 and y_pred > 0.
* power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
* power = 3 : Inverse Gaussian distribution. Requires: y_true > 0 and y_pred > 0.
* otherwise : Positive stable distribution. Requires: y_true > 0 and y_pred > 0.
reduce_group: the process group for DDP reduces (only needed for DDP training).
Defaults to all processes (world)
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
super().__init__('mean_tweedie_deviance',
reduce_group=reduce_group,
reduce_op=reduce_op,
power=power)
def forward(
self,
y_pred: np.ndarray,
y_true: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
):
"""
Args:
y_pred: Estimated target values
y_true: Ground truth (correct) target values.
sample_weight: Sample weights.
Return:
Mean tweedie deviance
"""
return super().forward(y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight)
| 36.335436
| 108
| 0.561473
|
29e4683de7cd1f6c6b8b7bdfb2e7f9937ead72d9
| 4,439
|
py
|
Python
|
powersimdata/scenario/scenario.py
|
lanesmith/PowerSimData
|
668fda79a316e3681d14c35099df75a7b21f87e4
|
[
"MIT"
] | null | null | null |
powersimdata/scenario/scenario.py
|
lanesmith/PowerSimData
|
668fda79a316e3681d14c35099df75a7b21f87e4
|
[
"MIT"
] | null | null | null |
powersimdata/scenario/scenario.py
|
lanesmith/PowerSimData
|
668fda79a316e3681d14c35099df75a7b21f87e4
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import pandas as pd
from powersimdata.data_access.context import Context
from powersimdata.data_access.execute_list import ExecuteListManager
from powersimdata.data_access.scenario_list import ScenarioListManager
from powersimdata.scenario.analyze import Analyze
from powersimdata.scenario.create import Create, _Builder
from powersimdata.scenario.execute import Execute
pd.set_option("display.max_colwidth", None)
class Scenario:
"""Handles scenario.
:param int/str descriptor: scenario name or index. If None, default to a Scenario
in Create state.
:raises TypeError: if descriptor is not int or str
"""
_setattr_allowlist = {
"data_access",
"state",
"status",
"info",
"_scenario_list_manager",
"_execute_list_manager",
}
_default_info = [
("plan", ""),
("name", ""),
("state", "create"),
("grid_model", ""),
("interconnect", ""),
("base_demand", ""),
("base_hydro", ""),
("base_solar", ""),
("base_wind", ""),
("change_table", ""),
("start_date", ""),
("end_date", ""),
("interval", ""),
("engine", ""),
]
def __init__(self, descriptor=None):
"""Constructor."""
if isinstance(descriptor, int):
descriptor = str(descriptor)
if descriptor is not None and not isinstance(descriptor, str):
raise TypeError("Descriptor must be a string or int (for a Scenario ID)")
self.data_access = Context.get_data_access()
self._scenario_list_manager = ScenarioListManager(self.data_access)
self._execute_list_manager = ExecuteListManager(self.data_access)
if not descriptor:
self.info = OrderedDict(self._default_info)
self.status = None
self.state = Create(self)
else:
self._set_info(descriptor)
try:
state = self.info["state"]
self._set_status()
if state == "execute":
self.state = Execute(self)
elif state == "analyze":
self.state = Analyze(self)
except AttributeError:
pass
def __getattr__(self, name):
if name in self.state.exported_methods:
return getattr(self.state, name)
elif hasattr(self.state, "__getattr__"):
return self.state.__getattr__(name)
else:
raise AttributeError(
f"Scenario object in {self.state.name} state "
f"has no attribute {name}"
)
def __setattr__(self, name, value):
if name in self._setattr_allowlist:
super().__setattr__(name, value)
elif isinstance(self.state, Create) and name in _Builder.exported_methods:
raise AttributeError(
f"{name} is exported from Scenario.state.builder, "
"edit it there if necessary"
)
elif name in self.state.exported_methods:
raise AttributeError(
f"{name} is exported from Scenario.state, edit it there if necessary"
)
super().__setattr__(name, value)
def __dir__(self):
return sorted(super().__dir__() + list(self.state.exported_methods))
def _set_info(self, descriptor):
"""Sets scenario information.
:param str descriptor: scenario descriptor.
"""
info = self._scenario_list_manager.get_scenario(descriptor)
if info is None:
raise ValueError(
f"{descriptor} not found in Scenario List. "
"See available scenarios with Scenario().get_scenario_table()"
)
self.info = info
def _set_status(self):
"""Sets execution status of scenario."""
scenario_id = self.info["id"]
self.status = self._execute_list_manager.get_status(scenario_id)
def get_scenario_table(self):
"""Get scenario table
:return: (*pandas.DataFrame*) -- scenario table
"""
return self._scenario_list_manager.get_scenario_table()
def change(self, state):
"""Changes state.
:param class state: One of :class:`.Analyze` :class:`.Create`,
:class:`.Execute` or :class:`.Delete`.
"""
self.state.switch(state)
| 32.881481
| 85
| 0.591575
|
0e5738c5707f561f3c669edd947db1d8cc82a6be
| 457
|
py
|
Python
|
lists/migrations/0003_list.py
|
joelstanner/TDD_python_part2
|
c4a987033e2fcf38256d7c10ee63d8902a5495e4
|
[
"MIT"
] | null | null | null |
lists/migrations/0003_list.py
|
joelstanner/TDD_python_part2
|
c4a987033e2fcf38256d7c10ee63d8902a5495e4
|
[
"MIT"
] | null | null | null |
lists/migrations/0003_list.py
|
joelstanner/TDD_python_part2
|
c4a987033e2fcf38256d7c10ee63d8902a5495e4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_item_text'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
],
),
]
| 21.761905
| 114
| 0.579869
|
ead4b552b1eca0a821ae78236a8cd8e16f7edef7
| 38,706
|
py
|
Python
|
testnet/ansible/inventory/inventory.py
|
ggreif/ic
|
ac56ec91f077c00d59eea3f73f51e14a1b3ea882
|
[
"Apache-2.0"
] | 1
|
2021-11-18T02:32:05.000Z
|
2021-11-18T02:32:05.000Z
|
testnet/ansible/inventory/inventory.py
|
ggreif/ic
|
ac56ec91f077c00d59eea3f73f51e14a1b3ea882
|
[
"Apache-2.0"
] | null | null | null |
testnet/ansible/inventory/inventory.py
|
ggreif/ic
|
ac56ec91f077c00d59eea3f73f51e14a1b3ea882
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# This script will generate the deployment inventory dynamically, based on the contents of:
# 1. /testnet/env/shared-config.yml
# 2. /testnet/env/<deployment>/hosts.ini
#
# To change the deployment config (inventory), you likely want to edit only
# /testnet/env/<deployment>/hosts.ini
#
# The contents of ../shared-config.yml are common (shared) for all deployments
# and part of that config may be overridden in /testnet/env/<deployment>/hosts.ini
#
# The 'hosts.ini' part of the deployment inventory can be overridden by setting an environment
# variable HOSTS_INI_FILENAME. E.g. `export HOSTS_INI_FILENAME=hosts_large_subnet.ini`
#
# The 'nodes' in the deployment inventory can be filtered (whitelisted) by setting an environment
# variable NODES_FILTER_INCLUDE. E.g. `export NODES_FILTER_INCLUDE='nns=root'`
# or also as a regular expression: `export NODES_FILTER_INCLUDE='nns=(parent|child)'`
#
import argparse
import io
import ipaddress
import json
import logging
import os
import pathlib
import re
import socket
import sys
import textwrap
from random import Random
import yaml
from ansible.inventory.manager import InventoryManager
from ansible.parsing.dataloader import DataLoader
# The assumption is that the script is located at <something>/testnet/ansible/inventory/inventory.py
# Then BASE_DIR becomes <something>/testnet
BASE_DIR = pathlib.PosixPath(__file__).absolute().parent.parent.parent
SERIAL_NUMBER_VARNAME = "system_serial_number"
class IcDeploymentInventory:
"""Dynamic inventory for the deployment."""
def __init__(self, deployment_name):
"""Initialize the class object."""
self._inventory = {}
self.deployment_name = deployment_name
self.common_config = None
self.data_centers = None
self.phy_serial_numbers_filename = None
self.phy_serial_numbers = {}
self.boundary_datacenters = None
self._all_nodes_hosts = []
self._nodes_filter_include = {}
self._all_nns_hosts = set()
self._all_boundary_hosts = set()
self._all_aux_hosts = set()
self._all_physical_hosts = {}
self._phy_short_mapping = {}
self._parent = {} # link up from a child group/host to the parent group
self.ic_hosts = {}
self._load_baseline_config()
self._load_hosts()
def _load_baseline_config(self):
"""Load the config shared by all deployments."""
cfg = BASE_DIR / "env/shared-config.yml"
self.common_config = yaml.load(open(cfg), Loader=yaml.FullLoader)
self.data_centers = self.common_config.get("data_centers")
if not self.data_centers:
logging.error("No data centers defined in %s", cfg)
self._inventory = {"_meta": {"hostvars": {}}}
for key, val in self.common_config.items():
if not isinstance(val, dict):
continue
inv = {}
if "hosts" in val:
inv["hosts"] = val["hosts"]
if "vars" in val:
inv["vars"] = val["vars"]
if "children" in val:
inv["children"] = val["children"]
if inv:
self._inventory[key] = inv
# Load a complete list of physical hosts and a unique string per host to generate a unique IPv6 address
self.phy_serial_numbers_filename = BASE_DIR / "env/host-unique-string.yml"
self.phy_serial_numbers = yaml.load(open(self.phy_serial_numbers_filename), Loader=yaml.FullLoader)
def _load_hosts(self):
# inventory hosts file can be comma separated
hosts_ini_filename = os.environ.get("HOSTS_INI_FILENAME", "hosts.ini")
inventory_filename = str(BASE_DIR / f"env/{self.deployment_name}/{hosts_ini_filename}")
inventory_dir = os.path.dirname(inventory_filename)
# Include only the nodes for which certain variables are set, e.g. `nns=parent`
filter_include = os.environ.get("NODES_FILTER_INCLUDE", "")
if filter_include:
if "=" in filter_include:
key, value = filter_include.split("=", 1)
self._nodes_filter_include[key] = value.split(",") # ',' is the AND operator, '|' is the OR operator
else:
logging.warning(
"Ignoring invalid filter_include (does not contain '='): %s",
filter_include,
)
# Loader takes care of finding and reading yaml, json and ini files
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=inventory_filename)
# filter the inventory nodes based on the NODES_FILTER_INCLUDE environment variable
inventory = self._filter_hosts(inventory)
if "physical_hosts" not in inventory.groups:
group = inventory.add_group("physical_hosts")
for host in self.phy_serial_numbers.keys():
inventory.add_host(host, group="physical_hosts")
for group in inventory.groups.values():
# get hosts from the common config and override if needed
group_name = str(group.name)
cur_inv_group = self._inventory.get(group_name, {})
# Merge the "hosts" with the shared-config.yml contents
hosts = set(cur_inv_group.get("hosts", []))
hosts.update([h.name for h in group.hosts])
if hosts:
cur_inv_group["hosts"] = sorted(hosts)
for host in cur_inv_group["hosts"]:
self._parent[host] = group_name
# Merge "vars" with the shared-config.yml contents
group_vars = cur_inv_group.get("vars", {})
group_vars.update(group.vars)
if group_vars:
cur_inv_group["vars"] = group_vars
# Merge the "children" with the shared-config.yml contents
children = set(cur_inv_group.get("children", []))
children.update([c.name for c in group.child_groups])
if group_name == "all":
children.add("physical_hosts")
if children:
cur_inv_group["children"] = sorted(children)
for child in cur_inv_group["children"]:
self._parent[child] = group_name
if cur_inv_group:
self._inventory[group.name] = cur_inv_group
# Populate the potentially missing variables for hosts
for host in inventory.get_hosts():
self._inventory["_meta"]["hostvars"][host.name] = host.vars
for host in self.phy_serial_numbers.keys():
self._inventory["_meta"]["hostvars"][host] = {
"inventory_file": inventory_filename,
"inventory_dir": inventory_dir,
SERIAL_NUMBER_VARNAME: self.phy_serial_numbers[host],
}
# Update a list of all "nodes" in the deployment
self._update_all_nodes_hosts(inventory)
self._update_nns_nodes(inventory)
self._update_boundary_nodes(inventory)
self._update_aux_nodes(inventory)
self._update_all_physical_nodes_hosts(inventory)
# Check and if necessary fix/set missing node_index
self._inventory_patch_node_index(inventory)
self._inventory_patch_external_nodes(inventory)
# Populate the potentially missing variables for hosts
for host in inventory.get_hosts():
if host.name in self._all_nodes_hosts:
host = self._host_patch_ipv6(host)
# Check and if necessary fix/set missing subnet_index
self._inventory_patch_subnet_index(inventory)
def _host_patch_ipv6(self, host):
"""Set the node IPv6 address, MAC address, guest hostname, and related."""
ansible_host = host.vars.get("ansible_host")
if "ipv6" not in host.vars:
ipv6 = self._ipv6_resolve(host.name)
ansible_host = host.vars.get("ansible_host")
if not ipv6 and ansible_host:
# ipv6 is not defined by ansible_host is.
# Let's try to build "ipv6" from ansible_host
ipv6 = self._ipv6_resolve(ansible_host)
if not ipv6:
# That didn't work, try to build IPv6 from the MAC address
ic_host = host.vars.get("ic_host")
if ic_host:
ipv6_prefix = self._get_ipv6_prefix_for_ic_host(ic_host)
ipv6_subnet = self._get_ipv6_subnet_for_ic_host(ic_host)
# For the mainnet deployments, the MAC address is calculated based on the number of guests on
# the physical host, so we need to enumerate and count the guests on each physical host.
phy_fqdn = self._phy_short_mapping[ic_host]
phy_vars = self._inventory["_meta"]["hostvars"][phy_fqdn]
# Assign a unique ID to each physical host. This will be a serial number if
# available, or fallback to the hostname.
phy_system_serial_number = phy_vars.get(SERIAL_NUMBER_VARNAME)
if not phy_system_serial_number:
logging.error(
"Physical host does not have a valid serial number: %s",
phy_fqdn,
)
env_hosts_path = self.phy_serial_numbers_filename.parent / self.deployment_name / "hosts.ini"
logging.error(
"ansible -i %s physical_hosts -m shell --become -a 'dmidecode -s system-serial-number'",
env_hosts_path,
)
logging.error(
"And update the serial numbers in %s",
self.phy_serial_numbers_filename.absolute(),
)
sys.exit(1)
# Each guest on a host (per deployment) gets a unique number 1..N
# used to generate a unique MAC address.
guest_number = phy_vars["ic_guests"].index(host.name) + 1
host.vars["guest_number"] = guest_number
guest_hostname = f"{host.name.rsplit('.', 1)[0]}-{guest_number}"
host.vars["guest_hostname"] = re.sub(r"\W+", "-", guest_hostname)
mac_address = mac_address_mainnet(phy_system_serial_number, self.deployment_name, guest_number)
host.vars["mac_address"] = mac_address
ipv6 = ipv6_address_calculate_slaac(ipv6_prefix, ipv6_subnet, mac_address)
if ipv6:
# Normalize the IPv6 address before using it elsewhere
ipv6 = ipaddress.ip_address(ipv6)
host.vars["ipv6"] = str(ipv6)
if not ansible_host:
host.vars["ansible_host"] = str(ipv6)
return host
def _inventory_patch_external_nodes(self, inventory):
"""Set an 'external' tag for nodes not operated by DFINITY."""
for hostname in self._all_nodes_hosts:
host_vars = self._inventory["_meta"]["hostvars"][hostname]
node_type = host_vars.get("node_type")
if node_type:
node_type_tags = set(node_type.split(","))
ic_host = host_vars.get("ic_host")
phy_fqdn = self._phy_short_mapping[ic_host]
phy_vars = self._inventory["_meta"]["hostvars"][phy_fqdn]
if "external" in node_type_tags:
phy_vars["external"] = True
else:
phy_vars["external"] = False
def _inventory_patch_node_index(self, inventory):
"""Set node_index for all hosts if any are missing."""
# Check if any node_index appears twice, possibly due to a copy&paste bug
found_node_idx = set()
for hostname in self._all_nodes_hosts:
# Check if any node doesn't have node_index set
host_vars = self._inventory["_meta"]["hostvars"][hostname]
if "node_index" not in host_vars:
# check if the node name ends with ".<number>" ==> take the number as the node_index
m = re.match(r".+\.(\d+)$", hostname)
if m:
# Example: if a host is named "small-a.anything.100", set the node_index to "100"
host_vars["node_index"] = int(m.group(1))
else:
raise ValueError("Missing node_index for host %s" % hostname)
node_index = host_vars.get("node_index")
if node_index:
if node_index in found_node_idx:
logging.error("Duplicate node_index '%s' for host %s", node_index, host_vars)
raise ValueError("Duplicate node_index")
else:
found_node_idx.add(node_index)
def _inventory_patch_subnet_index(self, inventory):
"""Set subnet_index for all hosts if any are missing."""
# For all subnet_X groups, copy all variables to the child hosts
for group in inventory.groups.values():
group_name = str(group.name).strip()
m = re.match(r"subnet_(\d+)", group_name)
if m:
subnet_index = int(m.group(1))
for host in group.hosts:
host_vars = self._inventory["_meta"]["hostvars"][str(host)]
if "subnet_index" not in host_vars:
host_vars["subnet_index"] = subnet_index
# For groups "subnet_unassigned" or any starting with "onboarding_" ==> no need to set the subnet index
elif group_name == "subnet_unassigned" or group_name.startswith("onboarding_"):
for host in group.hosts:
host_vars = self._inventory["_meta"]["hostvars"][str(host)]
if "subnet_index" not in host_vars:
host_vars["subnet_index"] = "x"
elif group_name == "nns":
# For NNS group, set the subnet_index to 0 (hard-coded)
for host in group.hosts:
host_vars = self._inventory["_meta"]["hostvars"][str(host)]
if "subnet_index" not in host_vars:
host_vars["subnet_index"] = 0
elif group_name == "boundary":
for host in group.hosts:
host_vars = self._inventory["_meta"]["hostvars"][str(host)]
host_vars["subnet_index"] = "boundary"
elif group_name == "aux":
for host in group.hosts:
host_vars = self._inventory["_meta"]["hostvars"][str(host)]
host_vars["subnet_index"] = "aux"
for hostname in self._all_nodes_hosts:
host_vars = self._inventory["_meta"]["hostvars"][hostname]
# check if a host is named "<deployment_name>.<subnet_index>.<node_index>"
m = re.match(r".+\.(\d+)\.\d+$", hostname)
if m:
# if a host is named e.g. "small-a.1.2", set the subnet_index to "1"
subnet_index = int(m.group(1))
if "subnet_index" in host_vars:
if subnet_index != host_vars["subnet_index"]:
raise ValueError("Mismatch subnet_index for host %s and its group name" % hostname)
else:
host_vars["subnet_index"] = int(m.group(1))
else:
if "subnet_index" not in host_vars:
raise ValueError("Missing subnet_index for host %s" % hostname)
def _filter_hosts(self, inventory):
if not self._nodes_filter_include:
return inventory
root_group = inventory.groups.get("nodes")
sub_groups_to_visit = set(root_group.child_groups)
while sub_groups_to_visit:
child = str(sub_groups_to_visit.pop())
sub_group = inventory.groups.get(child)
# For the other groups apply the filter
subgroup_hosts = list(sub_group.hosts or [])
for host in subgroup_hosts:
for key, pattern_list in self._nodes_filter_include.items():
# Prepare a list of the required tags for this host
missing_tags = set(pattern_list)
for pattern in pattern_list:
# Ensure that a complete tag is matched, not only the beginning.
pattern_word = pattern
if not pattern_word.endswith("$"):
pattern_word += "$"
for host_tag in host.vars.get(key, "").split(","):
if re.match(pattern_word, host_tag):
missing_tags.remove(pattern)
break
else:
break
if missing_tags:
sub_group.remove_host(host)
logging.debug(
"Host removed %s since %s=%s does not match the required filter '%s'",
host.name,
key,
host.vars.get(key),
pattern_list,
)
else:
logging.debug(
"Host %s (%s) satisfies matches the required filter '%s'",
host.name,
host.vars.get(key),
pattern_list,
)
sub_groups_to_visit.update(sub_group.child_groups or [])
return inventory
def _get_all_group_hosts(self, inventory, root_group_name):
root_group = inventory.groups.get(root_group_name)
if not root_group:
if root_group_name == "boundary":
logging.debug("Optional '%s' group not found", root_group_name)
return []
elif root_group_name == "physical_hosts":
logging.debug("Group '%s' not found", root_group_name)
return sorted(self.phy_serial_numbers.keys())
else:
logging.error("Required '%s' group not found", root_group_name)
return []
children = set(root_group.child_groups)
nodes = set(root_group.hosts)
while children:
child = str(children.pop())
sub_group = inventory.groups.get(child)
nodes.update(sub_group.hosts or [])
children.update(sub_group.child_groups or [])
return sorted([str(_) for _ in nodes])
def _update_all_nodes_hosts(self, inventory):
"""Return a sorted list of all hosts under the "nodes" group."""
self._all_nodes_hosts = self._get_all_group_hosts(inventory, "nodes")
def _update_nns_nodes(self, inventory):
"""Return a sorted list of all hosts under the "nns" group."""
self._all_nns_hosts = set(self._get_all_group_hosts(inventory, "nns"))
def _update_boundary_nodes(self, inventory):
"""Return a sorted list of all hosts under the boundary group."""
self._all_boundary_hosts = set(self._get_all_group_hosts(inventory, "boundary"))
def _update_aux_nodes(self, inventory):
"""Return a sorted list of all hosts under the aux group."""
self._all_aux_hosts = set(self._get_all_group_hosts(inventory, "aux"))
def _update_all_physical_nodes_hosts(self, inventory):
# make a complete list of physical hosts and the nodes assigned to them
for phy_fqdn in self._get_all_group_hosts(inventory, "physical_hosts"):
self._all_physical_hosts[phy_fqdn] = []
# make a complete list of all physical nodes, with their short hostname
self._phy_short_mapping = {}
for phy in self._all_physical_hosts.keys():
phy_short = phy.split(".")[0]
self._phy_short_mapping[phy_short] = phy
# For every physical host make a list of all nodes (VM guests) running on it
for node in self._all_nodes_hosts:
node_vars = self._inventory["_meta"]["hostvars"][node]
phy_short = node_vars.get("ic_host")
phy_fqdn = self._phy_short_mapping[phy_short]
if phy_short not in self._phy_short_mapping:
logging.error(
"Host %s not found in the list of physical hosts, check the contents of %s"
% (phy_short, self.phy_serial_numbers_filename.absolute())
)
sys.exit(1)
self._all_physical_hosts[phy_fqdn].append(node)
phy_hosts = set(self._inventory["physical_hosts"]["hosts"])
for phy_fqdn, nodes in self._all_physical_hosts.items():
phy_serial_number = self.phy_serial_numbers.get(phy_fqdn)
phy_vars = self._inventory["_meta"]["hostvars"][phy_fqdn]
if phy_serial_number:
phy_vars[SERIAL_NUMBER_VARNAME] = phy_serial_number
if nodes:
phy_vars["ic_guests"] = nodes
elif os.environ.get("INCLUDE_ALL_PHYSICAL_HOSTS"):
logging.debug("Physical host %s does not have any guests", phy_fqdn)
else:
# there are no nodes (VM guests) running on this physical host
phy_hosts.remove(phy_fqdn)
del self._inventory["_meta"]["hostvars"][phy_fqdn]
self._inventory["physical_hosts"]["hosts"] = sorted(phy_hosts)
self._inventory["all"]["vars"]["ic_deployment_name"] = self.deployment_name
def _ipv6_resolve(self, hostname):
if not (hostname.endswith(".dfinity.network") or hostname.endswith(".dfinity.systems")):
return
try:
return socket.getaddrinfo(hostname, None, socket.AF_INET6)[0][4][0]
except (OSError, KeyError):
pass
def _get_ipv6_prefix_for_ic_host(self, ic_host):
dc = self._get_dc_config_for_ic_host(ic_host)
return dc.get("vars", {}).get("ipv6_prefix")
def _get_ipv6_subnet_for_ic_host(self, ic_host):
dc = self._get_dc_config_for_ic_host(ic_host)
return dc.get("vars", {}).get("ipv6_subnet")
def _get_dc_config_for_ic_host(self, ic_host):
hostname_short = ic_host.split(".")[0]
dc = hostname_short.split("-")[0]
return self.data_centers.get(dc, {})
@property
def inventory(self):
"""Return the complete (read-only) inventory."""
# https://docs.ansible.com/ansible/latest/dev_guide/developing_inventory.html#inventory-script-conventions
return self._inventory
def hostvars(self, hostname=None):
"""Print either an empty JSON hash/dictionary, or a hash/dictionary of variables."""
if hostname:
inventory_vars = self._inventory["_meta"]["hostvars"]
if hostname in inventory_vars:
return inventory_vars[hostname]
else:
return self._inventory["all"].get("vars", {})
return {}
def _search_in_host_vars_and_in_parents(self, hostname, var_name):
"""Search for a variable defined either in the host vars, or parent's vars, its parents vars, etc."""
# NOTE: unused at the moment, but may be useful
if hostname not in self._inventory["_meta"]["hostvars"]:
return
host_vars = self._inventory["_meta"]["hostvars"][hostname]
if var_name in host_vars:
return host_vars[var_name]
parent = self._parent[hostname]
if not parent:
raise ValueError("Host must have at least one parent")
for _ in range(100):
# Search up to 100 parents
parent_vars = self._inventory[parent].get("vars", {})
if var_name in parent_vars:
return parent_vars[var_name]
parent = self._parent.get(parent)
if not parent:
# We reached the top-level parent of the host
return
raise ValueError("Reached the parent-search limit")
@property
def media_config(self):
"""Config data for preparing the USB media for network deployment."""
result = {
"deployment": self.deployment_name,
"name_servers": ["2606:4700:4700::1111", "2606:4700:4700::1001"],
"name_servers_fallback": ["2001:4860:4860::8888", "2001:4860:4860::8844"],
"datacenters": [],
}
nodes_vars = self._inventory["nodes"].get("vars", {})
journalbeat_hosts = nodes_vars.get("journalbeat_hosts", [])
result["journalbeat_hosts"] = journalbeat_hosts
journalbeat_index = nodes_vars.get("journalbeat_index", "")
result["journalbeat_index"] = journalbeat_index
deployment_dcs = set()
ic_nodes_by_dc = {}
for node in self._all_nodes_hosts:
node_vars = self.hostvars(node)
ic_host = node_vars.get("ic_host")
if not ic_host:
logging.error("No host (ic_host) defined for %s", node)
continue
# Create a list of all DCs used by this deployment
hostname_short = ic_host.split(".")[0]
dc_name = hostname_short.split("-")[0]
deployment_dcs.add(dc_name)
# Create a list of nodes sorted by DC
if dc_name not in ic_nodes_by_dc:
ic_nodes_by_dc[dc_name] = []
ic_nodes_by_dc[dc_name].append(node)
for dc_name in sorted(deployment_dcs):
dc_vars = self.data_centers[dc_name]["vars"]
dc_config = {
"name": dc_name,
"ipv6_prefix": dc_vars["ipv6_prefix"],
"ipv6_subnet": dc_vars["ipv6_subnet"],
"nodes": [],
"boundary_nodes": [],
"aux_nodes": [],
}
for node_name in ic_nodes_by_dc[dc_name]:
node_config = {}
node_vars = self.hostvars(node_name)
node_config["hostname"] = node_vars["guest_hostname"]
node_config["node_idx"] = node_vars["node_index"]
node_config["subnet_idx"] = node_vars["subnet_index"]
if node_name in self._all_nns_hosts:
node_config["subnet_type"] = "root_subnet"
elif node_name in self._all_boundary_hosts:
node_config["subnet_type"] = "boundary_subnet"
elif node_name in self._all_aux_hosts:
node_config["subnet_type"] = "aux_subnet"
else:
node_config["subnet_type"] = "app_subnet"
node_config["ipv6_address"] = node_vars["ipv6"]
use_hsm = node_vars.get("use_hsm")
if use_hsm:
node_config["use_hsm"] = use_hsm
if node_name in self._all_boundary_hosts:
dc_config["boundary_nodes"].append(node_config)
elif node_name in self._all_aux_hosts:
dc_config["aux_nodes"].append(node_config)
else:
dc_config["nodes"].append(node_config)
result["datacenters"].append(dc_config)
return result
@property
def ssh_config(self):
"""SSH configuration for the testnet hosts."""
with io.StringIO() as f:
for node in self._all_nodes_hosts:
node_vars = self.hostvars(node)
ipv6 = node_vars["ipv6"]
f.write("Host %s.testnet\n" % node)
f.write(" Hostname %s\n\n" % ipv6)
return f.getvalue()
@property
def ipv6_addresses(self):
"""Return a string with the IPv6 addresses for all deployment nodes."""
with io.StringIO() as f:
for node in self._all_nodes_hosts:
node_vars = self.hostvars(node)
f.write("%s\n" % node_vars["ipv6"])
return f.getvalue()
@property
def nodes(self):
"""Return a YAML string with all nodes and their IPv6 addresses."""
with io.StringIO() as f:
nodes = {}
for node in self._all_nodes_hosts:
node_vars = self.hostvars(node)
nodes[node] = node_vars["ipv6"]
yaml.dump(nodes, f)
return f.getvalue()
@property
def nns_nodes(self):
"""Return a YAML string with all NNS nodes and their IPv6 addresses."""
with io.StringIO() as f:
nodes = {}
for node in self._all_nns_hosts:
node_vars = self.hostvars(node)
nodes[node] = node_vars["ipv6"]
yaml.dump(nodes, f)
return f.getvalue()
# This function code is copied directly from ansible
# https://github.com/ansible-collections/community.general/blob/main/plugins/filter/random_mac.py
# note that we provide a seed value when we call this from mac_address_testnets and mac_address_mainnet
# so we get determisitic results.
def ansible_random_mac(value: str, seed: str):
"""Take string prefix, and return it completed with random bytes to get a complete 6 bytes MAC address."""
if not isinstance(value, str):
raise ValueError("Invalid value type (%s) for random_mac (%s)" % (type(value), value))
value = value.lower()
mac_items = value.split(":")
if len(mac_items) > 5:
raise ValueError("Invalid value (%s) for random_mac: 5 colon(:) separated" " items max" % value)
err = ""
for mac in mac_items:
if not mac:
err += ",empty item"
continue
if not re.match("[a-f0-9]{2}", mac):
err += ",%s not hexa byte" % mac
err = err.strip(",")
if err:
raise ValueError("Invalid value (%s) for random_mac: %s" % (value, err))
r = Random(seed)
# Generate random int between x1000000000 and xFFFFFFFFFF
v = r.randint(68719476736, 1099511627775)
# Select first n chars to complement input prefix
remain = 2 * (6 - len(mac_items))
rnd = ("%x" % v)[:remain]
return value + re.sub(r"(..)", r":\1", rnd)
def mac2eui64(mac, prefix=None):
"""Convert a MAC address to a EUI64 address or, with prefix provided, a full IPv6 address."""
# http://tools.ietf.org/html/rfc4291#section-2.5.1
eui64 = re.sub(r"[.:-]", "", mac).lower()
eui64 = eui64[0:6] + "fffe" + eui64[6:]
eui64 = hex(int(eui64[0:2], 16) ^ 2)[2:].zfill(2) + eui64[2:]
if prefix is None:
return ":".join(re.findall(r".{4}", eui64))
else:
try:
net = ipaddress.ip_network(prefix, strict=False)
euil = int("0x{0}".format(eui64), 16)
return str(net[euil])
except ValueError: # pylint: disable=bare-except
return
def mac_address_testnets(deployment_name: str, node_index: str):
"""Calculate the MAC address for a host in a testnet."""
return ansible_random_mac("52:00", f"{deployment_name} {node_index}")
def mac_address_mainnet(phy_system_serial_number: str, deployment_name: str, guest_number: int):
"""Calculate the MAC address for a host in the mainnet."""
mac_seed = f"{phy_system_serial_number} {deployment_name} {guest_number}"
return ansible_random_mac("52:00", mac_seed)
def ipv6_address_calculate_slaac(ipv6_prefix: str, ipv6_subnet: str, mac_address: str):
"""Calculate the same IPv6 address as SLAAC does, based on the interface MAC address."""
return mac2eui64(mac_address, f"{ipv6_prefix.strip()}::{ipv6_subnet.strip()}")
def main():
"""Parse and process CLI arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--list", action="store_true")
parser.add_argument("--host", action="store", nargs="?")
parser.add_argument(
"--deployment",
action="store",
help="Deployment name.",
)
parser.add_argument(
"--media-json",
action="store_true",
help="Dump the deployment JSON config for the USB media generation.",
)
parser.add_argument(
"--original-inventory-path",
action="store",
help="Original path by which the inventory was invoked. Allows the user to avoid having to specify"
" the DEPLOYMENT environment variable.",
)
parser.add_argument(
"--ssh-config",
action="store_true",
help="Configure local ssh client to access testnet hosts.",
)
parser.add_argument(
"--ipv6",
action="store_true",
help="List of IPv6 addresses for all nodes.",
)
parser.add_argument(
"--nodes",
action="store_true",
help="List of nodes with their IPv6 addresses.",
)
parser.add_argument(
"--nns-nodes",
action="store_true",
help="List of NNS nodes with their IPv6 addresses.",
)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose mode")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
deployment_name = args.deployment or os.environ.get("DEPLOYMENT")
if not deployment_name:
if args.original_inventory_path:
exe_path = args.original_inventory_path
else:
exe_path = pathlib.PosixPath(sys.argv[0]).absolute()
deployment_name = re.search(r"/env/(.+?)/hosts", str(exe_path))
if deployment_name:
deployment_name = deployment_name.group(1)
logging.debug(
"Setting the deployment_name based on the subdirectory: %s",
deployment_name,
)
if not deployment_name:
logging.error("--deployment is not set. Cannot continue.")
parser.print_help()
sys.exit(1)
deployment_inventory = IcDeploymentInventory(deployment_name=deployment_name)
inventory = {}
if args.media_json:
inventory = deployment_inventory.media_config
elif args.ipv6:
sys.stdout.write(deployment_inventory.ipv6_addresses)
sys.exit(0)
elif args.nodes:
sys.stdout.write(deployment_inventory.nodes)
sys.exit(0)
elif args.nns_nodes:
sys.stdout.write(deployment_inventory.nns_nodes)
sys.exit(0)
elif args.ssh_config:
cfg_path = pathlib.PosixPath().home() / ".ssh"
should_patch = True
with (cfg_path / "config").open("a+") as f:
f.seek(0)
cfg = f.read()
for line in cfg.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
# Decide what to do based on the first non-empty line.
if line == "Include ~/.ssh/config.d/*":
should_patch = False
break
if should_patch:
print("Injecting the 'Include' directive in ~/.ssh/config")
f.close()
with (cfg_path / "config").open("w+") as f:
f.write(
textwrap.dedent(
"""
Include ~/.ssh/config.d/*
Host *.testnet
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
LogLevel ERROR
User admin
"""
)
)
f.write(cfg)
# This more is required by the ssh client
(cfg_path / "config").chmod(mode=0o600)
cfg_path.mkdir(mode=0o700, exist_ok=True)
cfg_path = cfg_path / "config.d"
cfg_path.mkdir(mode=0o700, exist_ok=True)
cfg_path = (cfg_path / deployment_inventory.deployment_name).with_suffix(".testnet")
cfg_path.touch(mode=0o600)
with cfg_path.open(mode="w") as f:
f.write(deployment_inventory.ssh_config)
print("SSH config written to", cfg_path)
autocomplete = textwrap.dedent(
"""
_ssh()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts=$(grep '^Host' ~/.ssh/config ~/.ssh/config.d/* 2>/dev/null | grep -v '[?*]' | cut -d ' ' -f 2-)
COMPREPLY=( $(compgen -W "$opts" -- ${cur}) )
return 0
}
complete -F _ssh ssh
"""
)
if "bash" in os.environ.get("SHELL"):
cfg_path = pathlib.PosixPath().home() / ".config"
cfg_path.mkdir(exist_ok=True)
cfg_path = cfg_path / "bash-autocomplete-ic-testnets"
with cfg_path.open("w+") as f:
f.write(autocomplete)
with (pathlib.PosixPath().home() / ".bashrc").open("r+") as f:
autocomplete_line = f". {cfg_path}"
should_patch = True
for line in f.readlines():
if line.strip() == autocomplete_line:
print("Autocomplete support already enabled")
should_patch = False
break
if should_patch:
f.write("\n%s\n\n" % autocomplete_line)
print("Autocomplete support added. Please logout and login to take effect.")
print("or run to take effect in your current shell:")
print(autocomplete_line)
else:
print("Only bash is supported for autocomplete at the moment.")
print("Please reach out to us on #eng-idx to add autocomplete support for your shell.")
print("\nAll done.")
print(
"You should now be able to ssh into `{0}` testnet nodes with e.g. `ssh {0}.0.0.testnet`".format(
deployment_inventory.deployment_name
)
)
sys.exit(0)
else:
if args.list:
# Invoked with `--list`.
inventory = deployment_inventory.inventory
elif args.host:
# Invoked with `--host [hostname]`.
inventory = deployment_inventory.hostvars(args.host)
else:
# Return hostvars for "all".
inventory = deployment_inventory.hostvars()
print(json.dumps(inventory, indent=2, sort_keys=True))
if __name__ == "__main__":
main()
| 43.006667
| 117
| 0.581228
|
310b3f1b1cab5453e1f1c019d7ae0ce576bab021
| 5,897
|
py
|
Python
|
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/uu.py
|
ChisdealHD/DetlasWorldLinux
|
336465a4df1a48c9a273329fc7a09d8099c4e4d5
|
[
"MIT"
] | 3
|
2015-07-29T02:31:52.000Z
|
2017-01-07T15:48:44.000Z
|
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/uu.py
|
ChisdealHD/DetlasWorldLinux
|
336465a4df1a48c9a273329fc7a09d8099c4e4d5
|
[
"MIT"
] | 4
|
2018-02-22T07:42:13.000Z
|
2021-12-13T10:53:09.000Z
|
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/uu.py
|
ChisdealHD/DetlasWorldLinux
|
336465a4df1a48c9a273329fc7a09d8099c4e4d5
|
[
"MIT"
] | 4
|
2015-09-09T11:54:37.000Z
|
2018-05-26T05:08:14.000Z
|
#! /usr/bin/env python
# Copyright 1994 by Lance Ellinghouse
# Cathedral City, California Republic, United States of America.
# All Rights Reserved
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Lance Ellinghouse
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Modified by Jack Jansen, CWI, July 1995:
# - Use binascii module to do the actual line-by-line conversion
# between ascii and binary. This results in a 1000-fold speedup. The C
# version is still 5 times faster, though.
# - Arguments more compliant with python standard
"""Implementation of the UUencode and UUdecode functions.
encode(in_file, out_file [,name, mode])
decode(in_file [, out_file, mode])
"""
import binascii
import os
import sys
from types import StringType
__all__ = ["Error", "encode", "decode"]
class Error(Exception):
pass
def encode(in_file, out_file, name=None, mode=None):
"""Uuencode file"""
#
# If in_file is a pathname open it and change defaults
#
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, StringType):
if name is None:
name = os.path.basename(in_file)
if mode is None:
try:
mode = os.stat(in_file)[0]
except AttributeError:
pass
in_file = open(in_file, 'rb')
#
# Open out_file if it is a pathname
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, StringType):
out_file = open(out_file, 'w')
#
# Set defaults for name and mode
#
if name is None:
name = '-'
if mode is None:
mode = 0666
#
# Write the data
#
out_file.write('begin %o %s\n' % ((mode&0777),name))
str = in_file.read(45)
while len(str) > 0:
out_file.write(binascii.b2a_uu(str))
str = in_file.read(45)
out_file.write(' \nend\n')
def decode(in_file, out_file=None, mode=None, quiet=0):
"""Decode uuencoded file"""
#
# Open the input file, if needed.
#
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, StringType):
in_file = open(in_file)
#
# Read until a begin is encountered or we've exhausted the file
#
while 1:
hdr = in_file.readline()
if not hdr:
raise Error, 'No valid begin line found in input file'
if hdr[:5] != 'begin':
continue
hdrfields = hdr.split(" ", 2)
if len(hdrfields) == 3 and hdrfields[0] == 'begin':
try:
int(hdrfields[1], 8)
break
except ValueError:
pass
if out_file is None:
out_file = hdrfields[2].rstrip()
if os.path.exists(out_file):
raise Error, 'Cannot overwrite existing file: %s' % out_file
if mode is None:
mode = int(hdrfields[1], 8)
#
# Open the output file
#
opened = False
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, StringType):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
opened = True
#
# Main decoding loop
#
s = in_file.readline()
while s and s.strip() != 'end':
try:
data = binascii.a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
data = binascii.a2b_uu(s[:nbytes])
if not quiet:
sys.stderr.write("Warning: %s\n" % str(v))
out_file.write(data)
s = in_file.readline()
if not s:
raise Error, 'Truncated input file'
if opened:
out_file.close()
def test():
"""uuencode/uudecode main program"""
import getopt
dopt = 0
topt = 0
input = sys.stdin
output = sys.stdout
ok = 1
try:
optlist, args = getopt.getopt(sys.argv[1:], 'dt')
except getopt.error:
ok = 0
if not ok or len(args) > 2:
print 'Usage:', sys.argv[0], '[-d] [-t] [input [output]]'
print ' -d: Decode (in stead of encode)'
print ' -t: data is text, encoded format unix-compatible text'
sys.exit(1)
for o, a in optlist:
if o == '-d': dopt = 1
if o == '-t': topt = 1
if len(args) > 0:
input = args[0]
if len(args) > 1:
output = args[1]
if dopt:
if topt:
if isinstance(output, StringType):
output = open(output, 'w')
else:
print sys.argv[0], ': cannot do -t to stdout'
sys.exit(1)
decode(input, output)
else:
if topt:
if isinstance(input, StringType):
input = open(input, 'r')
else:
print sys.argv[0], ': cannot do -t from stdin'
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()
| 30.086735
| 72
| 0.588096
|
a966c595108186c7e08f689c9e0851ab1950254a
| 4,106
|
py
|
Python
|
main.py
|
daniellawrence/graphite-ok
|
29aa1a28aba83f9bef06cf78a7a581dba961479f
|
[
"MIT"
] | 1
|
2016-08-13T18:27:06.000Z
|
2016-08-13T18:27:06.000Z
|
main.py
|
daniellawrence/graphite-ok
|
29aa1a28aba83f9bef06cf78a7a581dba961479f
|
[
"MIT"
] | null | null | null |
main.py
|
daniellawrence/graphite-ok
|
29aa1a28aba83f9bef06cf78a7a581dba961479f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from flask import Flask
from flask import request
from flask import render_template
import requests
from settings import *
app = Flask(__name__)
def _message(message, data, http_code=200):
return render_template("results.html", message=message, data=data), http_code
def ok(message, data):
return _message("OK: %s" % message, data, 200)
def warning(message, data):
return _message("CRIT: %s" % message, data, WARNING_HTTP_RC)
def critical(message, data):
return _message("CRIT: %s" % message, data, ERROR_HTTP_RC)
def build_graphite_url(target, within='15minutes'):
base_render_url = 'http://%s/render?from=-%s&until=-&target=' % (GRAPHITE_SERVER, within)
render_url = '%s%s&format=json' % (base_render_url, target)
print render_url
return render_url
def build_graph_url(target, within='15minutes'):
base_render_url = 'http://%s/render?from=-%s&until=-&target=' % (GRAPHITE_SERVER, within)
render_url = '%s%s&height=500&width=800&lineMode=staircase&template=plain' % (base_render_url, target)
if 'asPercent' in render_url:
render_url += '&yMin=0&yMax=100'
print render_url
return render_url
def grab_graphite_data(target):
render_url = build_graphite_url(target)
graph_url = build_graph_url(target)
raw_graphite_data = requests.get(render_url)
dp_list = []
if raw_graphite_data.json() == []:
return critical("No data found", {})
all_graphite_data = raw_graphite_data.json()[0]
for dp, ts in all_graphite_data['datapoints']:
if dp is not None:
dp_list.append(dp)
dp_sum = sum(dp_list)
dp_max = max(dp_list)
dp_min = min(dp_list)
avg_dp = sum(dp_list) / len(dp_list)
return {
'avg': avg_dp, 'list': dp_list, 'max': dp_max, 'min': dp_min,
'render_url': render_url, 'graph_url': graph_url
}
def helper():
return render_template("base.html")
@app.route("/")
def index():
args = request.args
print args
if 'target' not in args:
return helper()
#return "ERROR: missing target"
target = args['target']
target_min = None
target_max = None
short_target = target.split('(')[-1].split(')')[0].split('%')[0]
if 'min' in args and args['min'] != '':
target_min = int(args['min'])
if 'max' in args and args['max'] != '':
target_max = int(args['max'])
data = grab_graphite_data(target)
if type(data) != type({}):
return data
data['target'] = target
data['short_target'] = short_target
data['target_min'] = target_min
data['target_max'] = target_max
if target_min:
data['graph_url'] = data['graph_url'] + '&target=threshold(%s, "min", "yellow")' % target_min
desc = "%s should be less than %s" % (short_target, target_min)
data['desc'] = desc
if target_max:
desc = "%s should be greater than %s" % (short_target, target_max)
data['desc'] = desc
data['graph_url'] = data['graph_url'] + '&target=threshold(%s, "max", "red")' % target_max
if target_min and target_max:
desc = "%s should be between %s and %s" % (short_target, target_min, target_max)
data['desc'] = desc
data_avg = data['avg']
if target_min and data_avg < target_min:
p_under = (target_min / data_avg) * 100 - 100
data['p_under'] = p_under
return critical("%.2f less than %s" % (data_avg, target_min), data)
if target_max and data_avg > target_max:
p_over = (target_max / data_avg) * 100
data['p_over'] = p_over
return critical("%.2f greater than %s" % (data_avg, target_max), data)
if target_min and target_max:
return ok("%.2f is within: %s-%s" % (data_avg, target_min, target_max), data)
if target_min:
return ok("%.2f is greater than min %s" % (data_avg, target_min), data)
if target_max:
return ok("%.2f is less than max %s" % (data_avg, target_max), data)
return ok("%.2f" % data_avg, data)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| 29.753623
| 106
| 0.635168
|
decb2a0acd352c93d2396b27018f2d33231d6306
| 31,326
|
py
|
Python
|
nemo/core/neural_modules.py
|
johnjdailey/NeMo
|
b2da2cb2d3382829943d1e1c579cc11381c34220
|
[
"Apache-2.0"
] | 1
|
2020-08-04T08:29:41.000Z
|
2020-08-04T08:29:41.000Z
|
nemo/core/neural_modules.py
|
johnjdailey/NeMo
|
b2da2cb2d3382829943d1e1c579cc11381c34220
|
[
"Apache-2.0"
] | null | null | null |
nemo/core/neural_modules.py
|
johnjdailey/NeMo
|
b2da2cb2d3382829943d1e1c579cc11381c34220
|
[
"Apache-2.0"
] | null | null | null |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['WeightShareTransform', 'NeuralModule', 'ModuleType']
import uuid
from abc import abstractmethod
from collections import namedtuple
from enum import Enum
from inspect import getargvalues, getfullargspec, stack
from os import path
from typing import Any, Dict, List, Optional, Set, Tuple
from ruamel.yaml import YAML
from nemo.core import NeuralModuleFactory, OperationMode
from nemo.core.neural_interface import NeuralInterface
from nemo.core.neural_types import NeuralPortNameMismatchError, NeuralType, NmTensor
from nemo.package_info import __version__ as nemo_version
from nemo.utils import logging
from nemo.utils.decorators.deprecated import deprecated
from nemo.utils.neural_graph.connection import StepModulePort
YAML = YAML(typ='safe')
class ModuleType(Enum):
""" Back-end independent module types """
module = 0
datalayer = 1
trainable = 2
loss = 3
nontrainable = 4
class WeightShareTransform(Enum):
"""When sharing parameters, what kind of transform to apply."""
SAME = 0
TRANSPOSE = 1
PretrainedModelInfo = namedtuple(
"PretrainedModleInfo", ("pretrained_model_name", "description", "parameters", "location"),
)
class NeuralModule(NeuralInterface):
"""
Abstract class that every Neural Module must inherit from.
"""
def __init__(self, name=None):
# Initialize the inferface.
super().__init__()
# Retrieve dictionary of parameters (keys, values) passed to init.
self._init_params = self.__extract_init_params()
# Get object UUID.
self._uuid = str(uuid.uuid4())
# Register module and store the generated name.
self._name = self._app_state.register_module(self, name)
# Set "module" type as default.
self._type = ModuleType.module
# Set "both" as default operation mode.
self._operation_mode = OperationMode.both
# Get default factory.
self._factory = NeuralModuleFactory.get_default_factory()
# Set module properties from factory else use defaults
self._placement = self._factory.placement
# If one needs to change that should override it manually.
# Optimization level.
self._opt_level = self._factory.optim_level
@property
def init_params(self) -> Dict[str, Any]:
"""
Property returning parameters used to instantiate the module.
Returns:
Dictionary containing parameters used to instantiate the module.
"""
return self._init_params
def __extract_init_params(self) -> Dict[str, Any]:
"""
Retrieves the dictionary of of parameters (keys, values) passed to constructor of a class derived
(also indirectly) from the Neural Module class.
Returns:
Dictionary containing parameters passed to init().
"""
# Get names of arguments of the original module init method.
to_set_params = getfullargspec(type(self).__init__).args
to_set_params.remove("self")
# Create empty list of init params.
init_params = {}
# Get the frame "call context".
for frame in stack()[1:]:
# Get the current call arguments.
localvars = getargvalues(frame[0])
# Fill the parameters with call arguments.
for key in to_set_params:
if key in localvars.args:
init_params[key] = localvars.locals[key]
# Remove all set keys.
for key in init_params.keys():
if key in to_set_params:
to_set_params.remove(key)
# Check if we have set everything.
if len(to_set_params) == 0:
break
# Make sure that we collected ALL (and ONLY) the signature params - if not, then there is a BUG!
if len(to_set_params) != 0:
raise ValueError(
"Could not collect all the signature params! "
f"Please file a bug on GitHub with the current stack trace so that it can be reproduced."
)
# print("! init_params of {}: {}\n".format(type(self).__name__, init_params))
# Return parameters.
return init_params
def __validate_params(self, params: Dict[str, Any]) -> bool:
"""
Checks whether dictionary contains parameters being primitive types (string, int, float etc.)
or (lists of)+ primitive types.
Args:
params: dictionary of parameters.
Returns:
True if all parameters were ok, False otherwise.
"""
ok = True
# Iterate over parameters and check them one by one.
for key, variable in params.items():
if not self.__is_of_allowed_type(variable):
logging.warning(
"Parameter '{}' contains a variable '{}' of type '{}' which is not allowed.".format(
key, variable, type(variable)
)
)
ok = False
# Return the result.
return ok
def __is_of_allowed_type(self, var) -> bool:
"""
A recursive function that checks if a given variable is of allowed type.
Args:
pretrained_model_name (str): name of pretrained model to use in order.
Returns:
True if all parameters were ok, False otherwise.
"""
# Special case: None is also allowed.
if var is None:
return True
var_type = type(var)
# If this is list - check its elements.
if var_type == list:
for list_var in var:
if not self.__is_of_allowed_type(list_var):
return False
# If this is dict - check its elements.
elif var_type == dict:
for _, dict_var in var.items():
if not self.__is_of_allowed_type(dict_var):
return False
elif var_type not in (str, int, float, bool):
return False
# Well, seems that everything is ok.
return True
def export_to_config(self, config_file: str):
"""
A function that exports module "configuration" (i.e. init parameters) to a YAML file.
Args:
config_file: path (absolute or relative) and name of the config file (YML)
Raises:
ValueError: An error occurred and parameters coudn't be exported.
"""
# Greate an absolute path.
abs_path_file = path.expanduser(config_file)
# Serialize the module.
to_export = self.serialize()
# All parameters are ok, let's export.
with open(abs_path_file, 'w') as outfile:
YAML.dump(to_export, outfile)
logging.info(
"Configuration of module `{}` ({}) exported to '{}'".format(self.name, type(self).__name__, abs_path_file)
)
def serialize(self) -> Dict[str, Any]:
"""
A method serializing the whole Neural module (into a dictionary).
Returns:
Dictionary containing a "serialized" module.
"""
# Create a dictionary representing the serialized object.
serialized_module = {}
# Add "header" with module "specification".
serialized_module["header"] = self.__serialize_header()
# Add init parameters.
serialized_module["init_params"] = self._serialize_configuration()
# Return the dictionary.
return serialized_module
def __serialize_header(self) -> Dict[str, Any]:
"""
A protected method that creates a header stored later in the configuration file.
Returns:
Dictionary containing a header with module specification.
"""
# Get module "full specification".
module_full_spec = str(self.__module__) + "." + str(self.__class__.__qualname__)
module_class_name = type(self).__name__
# print(module_full_spec)
# Check whether module belongs to a collection.
spec_list = module_full_spec.split(".")
# Do not check Neural Modules from unit tests.
if spec_list[0] == "tests":
# Set collection variables.
collection_type = "tests"
collection_version = None
else:
# Check if component belongs to any collection
if len(spec_list) < 3 or (spec_list[0] != "nemo" and spec_list[1] != "collection"):
logging.warning(
"Module `{}` does not belong to any collection. This won't be allowed in the next release.".format(
module_class_name
)
)
collection_type = "unknown"
collection_version = None
else:
# Ok, set collection.
collection_type = spec_list[2]
collection_version = None
# TODO: to be SET!
# print(getattr("nemo.collections.nlp", __version__))
# Create a "header" with module "specification".
header = {
"nemo_core_version": nemo_version,
"collection_type": collection_type,
"collection_version": collection_version,
# "class": module_class_name, # Operating only on full_spec now.
"full_spec": module_full_spec,
}
return header
def _serialize_configuration(self) -> Dict[str, Any]:
"""
A function that serializes the module "configuration (i.e. init parameters) to a dictionary.
..note:
Thus functions should be overloaded when writing a custom module import/export.
Returns:
A "serialized" dictionary with module configuration.
Raises:
A ValueError exception in case then parameters coudn't be exported.
"""
# Check if generic export will work.
if not self.__validate_params(self._init_params):
raise ValueError(
"Generic configuration export enables to use of parameters of primitive types (string, int, float) "
F"or (lists of/dicts of) primitive types. Please implement your own custom `export_to_config()` and "
F"`import_from_config()` methods for your custom Module class."
)
# In this case configuration = init parameters.
return self._init_params
@classmethod
def import_from_config(
cls, config_file: str, section_name: str = None, name: str = None, overwrite_params: Dict = {}
) -> 'NeuralModule':
"""
Class method importing the configuration file.
Raises an ImportError exception when config file is invalid or
incompatible (when called from a particular class).
Args:
config_file: path (absolute or relative) and name of the config file (YML)
section_name: section in the configuration file storing module configuration (optional, DEFAULT: None)
name: name of the module that will overwrite the name in the `init_params` (optional, DEFAULT: None)
overwrite_params: Dictionary containing parameters that will be added to or overwrite (!)
the default init parameters loaded from the configuration file (the module "init_params" section).
Returns:
Instance of the created NeuralModule object.
"""
logging.info("Loading configuration of a new Neural Module from the `{}` file".format(config_file))
# Validate the content of the configuration file (its header).
loaded_config = cls.__validate_config_file(config_file, section_name)
# "Deserialize" the module.
obj = cls.deserialize(loaded_config, name, overwrite_params)
# Return the new module.
return obj
@classmethod
def __validate_config_file(cls, config_file: str, section_name: str = None) -> Dict[str, Any]:
"""
Class method validating whether the config file has a proper content (sections, specification etc.).
Raises an ImportError exception when config file is invalid or
incompatible (when called from a particular class).
Args:
config_file: path (absolute or relative) and name of the config file (YML)
section_name: section in the configuration file storing module configuration (optional, DEFAULT: None)
Returns:
A loaded configuration file (dictionary).
"""
# Greate an absolute path.
abs_path_file = path.expanduser(config_file)
# Open the config file.
with open(abs_path_file, 'r') as stream:
loaded_config = YAML.load(stream)
# Check section.
if section_name is not None:
if section_name not in loaded_config:
raise ImportError(
"The loaded config `{}` doesn't contain the indicated `{}` section".format(
config_file, section_name
)
)
# Section exists - use only it for configuration.
loaded_config = loaded_config[section_name]
# Make sure that the config is valid.
if "header" not in loaded_config:
raise ImportError("The loaded config `{}` doesn't contain the `header` section".format(config_file))
if "init_params" not in loaded_config:
raise ImportError("The loaded config `{}` doesn't contain the `init_params` section".format(config_file))
# Parse the "full specification".
spec_list = loaded_config["header"]["full_spec"].split(".")
# Check if config contains data of a compatible class.
if cls.__name__ != "NeuralModule" and spec_list[-1] != cls.__name__:
txt = "The loaded file `{}` contains configuration of ".format(config_file)
txt = txt + "`{}` thus cannot be used for instantiation of an object of type `{}`".format(
spec_list[-1], cls.__name__
)
raise ImportError(txt)
# Success - return configuration.
return loaded_config
@classmethod
def deserialize(
cls, configuration: Dict[str, Any], name: str = None, overwrite_params: Dict[str, Any] = {}
) -> 'NeuralModule':
"""
Class method instantianting the neural module object based on the configuration (dictionary).
Args:
configuration: Dictionary containing proper "header" and "init_params" sections.
name: name of the module that will overwrite the name in the `init_params` (optional, DEFAULT: None)
overwrite_params: Dictionary containing parameters that will be added to or overwrite (!)
the default init parameters loaded from the configuration file (the module "init_params" section).
Returns:
Instance of the created NeuralModule object.
"""
# Deserialize header - get object class.
module_class = cls.__deserialize_header(configuration["header"])
# Update parameters with additional ones.
configuration["init_params"].update(overwrite_params)
# Override module name in init_params using the logic:
# * section_name if not none overrides init_params.name first (skipped for now, TOTHINK!)
# * name (if None) overrides init_params.name
if name is not None:
configuration["init_params"]["name"] = name
# Get init parameters.
init_params = cls._deserialize_configuration(configuration["init_params"])
# Create the module instance.
new_module = module_class(**init_params)
logging.info(
"Instantiated a new Neural Module named `{}` of type `{}`".format(
new_module.name, type(new_module).__name__
)
)
# Return the module instance.
return new_module
@classmethod
def __deserialize_header(cls, serialized_header: Dict[str, Any]):
"""
Method deserializes the header and extracts the module class.
Args:
serialized_header: Dictionary containing module header.
Returns:
Class of the module to be created.
"""
# Parse the "full specification".
spec_list = serialized_header["full_spec"].split(".")
# Get module class from the "full specification".
mod_obj = __import__(spec_list[0])
for spec in spec_list[1:]:
mod_obj = getattr(mod_obj, spec)
# Return "class".
return mod_obj
@classmethod
def _deserialize_configuration(cls, serialized_init_params: Dict[str, Any]):
"""
A function that deserializes the module "configuration (i.e. init parameters).
..note:
Thus functions should be overloaded when writing a custom module import/export.
Args:
serialized_init_params: List of init parameters loaded from the file.
Returns:
A "deserialized" list with init parameters.
"""
# In this case configuration = init parameters.
return serialized_init_params
@property
@abstractmethod
def input_ports(self) -> Dict[str, NeuralType]:
"""
Returns definitions of module input ports
Returns:
A dictionary containing module's input ports (names, NeuralTypes) mapping.
"""
@property
@abstractmethod
def output_ports(self) -> Dict[str, NeuralType]:
"""
Returns definitions of module output ports
Returns:
A dictionary containing module's output ports (names, NeuralTypes) mapping.
"""
@property
def _disabled_deployment_input_ports(self) -> Set[str]:
"""Returns names of input ports that will not be included in an export
Returns:
A (set) of module's input port names that are not exportable
"""
return set([])
@property
def _disabled_deployment_output_ports(self) -> Set[str]:
"""Returns names of output ports that will not be included in an export
Returns:
A (set) of module's output port names that are not exportable
"""
return set([])
def _prepare_for_deployment(self) -> None:
"""Patch the module if required to prepare for deployment
"""
return
@property
def operation_mode(self):
""" Returns the operation mode. """
return self._operation_mode
@property
def type(self):
""" Returns the type of module. """
return self._type
@operation_mode.setter
def operation_mode(self, operation_mode: OperationMode):
""" Sets the operation mode. """
self._operation_mode = operation_mode
@staticmethod
def pretrained_storage():
return ''
def __call__(self, **kwargs):
"""This method allows objects to be called with their port names
Args:
kwargs: Input ports and their values. For example:
...
mymodule1 = Subclass1_of_NeuralModule(...)
mymodule2 = Subclass2_of_NeuralModule(...)
...
out_port1, out_port2 = mymodule1(input_port1=value1,
input_port2=value2,
input_port3=value3)
out_port11 = mymodule2(input_port1=out_port2)
...
Returns:
NmTensor object or tuple of NmTensor objects
"""
# print(" Neural Module:__call__")
# Set the operation mode of the outer graph.
self.operation_mode = self._app_state.active_graph.operation_mode
# The input and output ports definitions can potentially depend on the operation mode!
# Record the operation (i.e. add a single module).
step_number = self._app_state.active_graph.record_step(self)
###### PROCESS INPUTS. ######
# Iterate through all passed parameters.
for port_name, port_content in kwargs.items():
# Make sure that passed arguments corresponds to one of the input port names.
if port_name not in self.input_ports.keys():
raise NeuralPortNameMismatchError(port_name)
# At that point the input can be one of three types:
# * NeuralGraph -> bind port using the default name and type.
# * GraphInput -> check definition, if ok bind port.
# * NmTensor -> check definition, add self as a "consumer" of a tensor (produced by other module).
# Check what was actually passed.
if type(port_content).__name__ == "NeuralGraph":
# Make sure that port_content is the currently active graph!
if port_content is not self._app_state.active_graph:
raise ConnectionError("Ports can be bound only by passing the active graph object!")
# Create an alias so the logic will be more clear.
active_graph = port_content
# This case: we are nesting one graph into another and must bind input port of one graph in another!
# So generally we must "copy" the of thus module to graog (the inverted logic!).
# Copy the port "definition" (i.e. is NeuralType) using the same port name.
active_graph.inputs[port_name] = self.input_ports[port_name]
# Bind the neural graph input port, i.e. remember that a given graph port should pass data
# to THIS module-port (when it finally will be connected).
active_graph.inputs[port_name].bind(StepModulePort(step_number, self.name, port_name))
# Please note that there are no "consumers" here - this is a "pure binding".
elif type(port_content).__name__ == "GraphInput":
# Check if GraphInput belongs to the active graph !
own_port = False
for gcontent in self._app_state.active_graph.inputs.values():
if gcontent is port_content:
own_port = True
break
if not own_port:
raise NeuralPortNameMismatchError(port_name)
# Compare input port definition with the received definition.
self.input_ports[port_name].compare_and_raise_error(
self.__class__.__name__, port_name, port_content.ntype
)
# Bind the neural graph input port, i.e. remember that a given graph port should pass data
# to THIS module-port (when it finally will be connected).
port_content.bind(StepModulePort(step_number, self.name, port_name))
# Please note that there are no "consumers" here - this is a "pure binding".
elif type(port_content) is NmTensor:
# Compare input port definition with the received definition.
self.input_ports[port_name].compare_and_raise_error(self.__class__.__name__, port_name, port_content)
# Ok, the goal here is to actually "connect": add self (module) as "consumer" to the input tensor.
port_content.add_consumer(StepModulePort(step_number, self.name, port_name))
else:
raise TypeError(
"Input '{}' must be of one of three types: NeuralGraph, GraphInput or NmTensor".format(port_name)
)
###### PRODUCE OUTPUTS. ######
output_port_defs = self.output_ports
# Create output tensors.
if len(output_port_defs) == 1:
# Get port name and type.
out_name = list(output_port_defs)[0]
out_type = output_port_defs[out_name]
# Create a single returned tensor.
results = NmTensor(producer=self, producer_args=kwargs, output_port_name=out_name, ntype=out_type,)
# Bind the "default" output ports.
self._app_state.active_graph.bind_outputs(results)
else:
# Create output tensors.
output_tensors = []
for out_name, out_type in output_port_defs.items():
output_tensors.append(
NmTensor(producer=self, producer_args=kwargs, output_port_name=out_name, ntype=out_type,)
)
# Create a named tuple type enabling to access outputs by attributes (e.g. out.x).
output_class_name = f'{self.__class__.__name__}Output'
result_type = namedtuple(typename=output_class_name, field_names=output_port_defs.keys())
# Create the returned tuple object.
results = result_type(*output_tensors)
# Bind the output tensors.
self._app_state.active_graph.bind_outputs(output_tensors)
# Return the results.
return results
def __str__(self):
return self.__class__.__name__
@abstractmethod
def get_weights(self) -> Optional[Dict[(str, bool)]]:
"""Returns NeuralModule's weights copy.
Returns:
Dictionary of name -> (weights, trainable)"""
pass
@abstractmethod
def set_weights(
self,
name2weight: Dict[(str, Tuple[str, bool])],
name2name_and_transform: Dict[(str, Tuple[str, WeightShareTransform])] = None,
):
"""Sets weight from given values. For every named weight in
name2weight,
if weight with the same name is found in the model, it will be set to
found value.
WARNING: This will NOT tie weights. It will copy values.
If ``name2name_and_transform`` is provided then if will set weights
using
name mapping and transform. For example, suppose ``objec1.X = 3x5
weight``.
Then, if ``name2name_and_transform['X']=('Y',
WeightShareTransform.TRANSPOSE)``
and ``Y`` is 5x3 weight and ``name2weight['Y']=Y. Then:
``object1.set_weights(name2weight, name2name_and_transform)`` will
set object1.X=transpose(Y).
Args:
name2weight (dict): dictionary of name to (weight, trainable).
Typically this is output of get_weights method.
name2name_and_transform: mapping from name -> (name, transform)
"""
pass
@staticmethod
def list_pretrained_models() -> Optional[List[PretrainedModelInfo]]:
"""List all available pre-trained models (e.g. weights) for this NM.
Returns:
A list of PretrainedModelInfo tuples.
The pretrained_model_name field of the tuple can be used to
retrieve pre-trained model's weights (pass it as
pretrained_model_name argument to the module's constructor)
"""
return None
def get_config_dict_and_checkpoint(self, pretrained_model_name):
"""WARNING: This part is work in progress"""
return None
@abstractmethod
def tie_weights_with(
self,
module,
weight_names=List[str],
name2name_and_transform: Dict[(str, Tuple[str, WeightShareTransform])] = None,
):
"""Ties weights between self and module. For every weight name in
weight_names, if weight with the same name is found in self, it will
be tied
with a same weight from ``module``.
WARNING: Once weights are tied, updates to one weights's weights
will affect
other module's weights.
If ``name2name_and_transform`` is provided then if will set weights
using
name mapping and transform. For example, suppose ``objec1.X = 3x5
weights``
and ``object2.Y = 5x3 weights``. Then these weights can be tied like
this:
.. code-block:: python
object1.tie_weights_with(object2, weight_names=['X'],
name2name_and_transform =
{ 'X': ('Y', WeightShareTransform.TRANSPOSE)})
Args:
module: with which module to tie weights
weight_names (List[str]): list of self weights' names
name2name_and_transform: mapping from name -> (name, transform)
"""
pass
def is_trainable(self) -> bool:
"""
Checks if NeuralModule is trainable.
A NeuralModule is trainable IFF it contains at least one trainable
weight
Returns:
True if module has trainable weights, False otherwise
"""
weights = self.get_weights()
if weights is None:
return False
for name, w in weights.items():
if w[1]:
return True
return False
@abstractmethod
def save_to(self, path: str):
"""Save module state to file.
Args:
path (string): path to while where to save.
"""
pass
@abstractmethod
def restore_from(self, path: str):
"""Restore module's state from file.
Args:
path (string): path to where to restore from.
"""
pass
@abstractmethod
def freeze(self, weights: Set[str] = None):
"""Freeze weights
Args:
weights (set): set of weight names to freeze
If None, all weights are freezed.
"""
pass
@abstractmethod
def unfreeze(self, weights: Set[str] = None):
"""Unfreeze weights
Args:
weights (set): set of weight names to unfreeze
If None, all weights are unfreezed.
"""
pass
@property
def placement(self):
"""Module's placement. Currently CPU or GPU.
DataParallel and ModelParallel will come later.
Returns:
(DeviceType) Device where NM's weights are located
"""
return self._placement
@property
@deprecated(version=0.11)
def local_parameters(self) -> Optional[Dict]:
"""Get module's parameters
Returns:
module's parameters
"""
return self._init_params
# return self._local_parameters
@property
def unique_instance_id(self):
"""A unique instance id for this object
Returns:
A uniq uuid which can be used to identify this object
"""
return self._uuid
@property
def factory(self):
""" Neural module factory which created this module
Returns: NeuralModuleFactory instance or None
"""
return self._factory
@property
@abstractmethod
def num_weights(self):
"""Number of module's weights
"""
pass
| 36.17321
| 119
| 0.61741
|
ca660f639511102b8e987d5985fcf53d4fa5fd16
| 4,893
|
py
|
Python
|
minesweeper/minesweeper_ai.py
|
kindjie/minesweeper
|
996f64187cdfc8d68db40c58d713b7b0ffd8d237
|
[
"MIT"
] | 1
|
2018-04-14T00:40:41.000Z
|
2018-04-14T00:40:41.000Z
|
minesweeper/minesweeper_ai.py
|
kindjie/minesweeper
|
996f64187cdfc8d68db40c58d713b7b0ffd8d237
|
[
"MIT"
] | null | null | null |
minesweeper/minesweeper_ai.py
|
kindjie/minesweeper
|
996f64187cdfc8d68db40c58d713b7b0ffd8d237
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
import time
from minesweeper.minesweeper import Command, CmdType, BoardState
class MineSweeperAI(object):
NUM_GAMES = 20
THINK_DELAY = 0.1 # Seconds
GAME_OVER_DELAY = 1.0 # Seconds
_MIN_RISK = 0.0
_MAX_RISK = 1.0
_NUM_NEIGHBOURS = 8
CellRisk = namedtuple('CellRisk', ['risk', 'x', 'y'])
def __init__(self, game):
self._game = game
self._cache = {}
def __iter__(self):
return self
def next(self):
self._clear_cache()
time.sleep(self.THINK_DELAY)
if self._game.game_over:
return self._handle_game_over()
best_flag_move, best_reveal_move = self._find_best_moves()
if best_flag_move.risk >= self._MAX_RISK:
return Command(CmdType.TOGGLE_FLAG, best_flag_move.x, best_flag_move.y)
else:
# Might need to guess if we aren't certain about a flag.
return Command(CmdType.REVEAL, best_reveal_move.x, best_reveal_move.y)
__next__ = next
def _moves(self, cells):
for x, y in cells:
risk = self._calc_risk(x, y)
yield risk
if risk == self._MIN_RISK or risk == self._MAX_RISK:
# Found candidate move, so we can stop.
return
def _find_best_moves(self):
hidden_cells = [(x, y) for x, y, state in self._game.board if state == BoardState.HIDDEN]
risks = sorted(self._moves(hidden_cells), key=lambda r: r.risk)
best_reveal = risks[0]
best_flag = risks[-1]
return best_flag, best_reveal
def _calc_risk(self, x, y):
if self._is_definite_safe(x, y):
return self.CellRisk(self._MIN_RISK, x, y)
if self._is_definite_mine(x, y):
return self.CellRisk(self._MAX_RISK, x, y)
general_prob = float(self._game.board.num_mines - self._game.num_flags) / \
float(self._game.board.num_hidden - self._game.num_flags)
def prob_mine(ax, ay):
val = self._game.board.get(ax, ay)
if val is None:
# Prefer edges since they have a better chance of revealing more cells.
return general_prob / 2.0
if val > self._NUM_NEIGHBOURS:
# No extra information from this cell.
return general_prob
# Use number of expected versus found mines to estimate likelihood.
hidden = self._count_neighbours_state(ax, ay, BoardState.HIDDEN)
flags = self._count_neighbours_state(ax, ay, BoardState.FLAG)
return float(val - flags) / float(hidden)
# Calculating actual probability takes exponential time.
# Let's average probabilities to get a rough estimate of risk.
probabilities = [prob_mine(ax, ay) for ax, ay in self._game.board._adjacent_pos(x, y)]
return self.CellRisk(sum(probabilities) / len(probabilities), x, y)
def _handle_game_over(self):
time.sleep(self.GAME_OVER_DELAY)
if self._game.num_games >= self.NUM_GAMES:
raise StopIteration()
else:
return Command(CmdType.NONE, 0, 0)
def _count_neighbours_state(self, x, y, state):
if not self._in_cache('count', (x, y, state)):
self._set_cache('count', (x, y, state),
sum(1 for ax, ay in self._game.board._adjacent_pos(x, y) \
if self._game.board.get(ax, ay) == state))
return self._get_cache('count', (x, y, state))
def _get_neighbour_values(self, x, y):
return [self._game.board.get(ax, ay) for ax, ay in self._game.board._adjacent_pos(x, y)]
def _is_definite_safe(self, x, y):
return any(self._count_neighbours_state(ax, ay, BoardState.FLAG) == \
self._game.board.get(ax, ay) for ax, ay in self._game.board._adjacent_pos(x, y))
def _is_definite_mine(self, x, y):
neighbours = self._game.board._adjacent_pos(x, y)
for n in neighbours:
num_flagged_neighbours_of_n = self._count_neighbours_state(*n, state=BoardState.FLAG)
num_hidden_neighbours_of_n = self._count_neighbours_state(*n, state=BoardState.HIDDEN)
state = self._game.board.get(*n)
if state is None:
continue
if state <= self._NUM_NEIGHBOURS and num_hidden_neighbours_of_n <= \
state - num_flagged_neighbours_of_n:
return True
return False
def _set_cache(self, method, params, value):
self._cache[(method, params)] = value
def _in_cache(self, method, params):
return (method, params) in self._cache
def _get_cache(self, method, params):
return self._cache[(method, params)]
def _clear_cache(self):
self._cache = {}
| 37.068182
| 99
| 0.610464
|
8fa714eb7d308587db21ec76b41f3bbf6b0306d5
| 289
|
py
|
Python
|
modules/mloader/constants.py
|
Dastan21/ScantradFranceAPI
|
e145a51d21123c518bd1bda90277bc06fff811b0
|
[
"Apache-2.0"
] | null | null | null |
modules/mloader/constants.py
|
Dastan21/ScantradFranceAPI
|
e145a51d21123c518bd1bda90277bc06fff811b0
|
[
"Apache-2.0"
] | null | null | null |
modules/mloader/constants.py
|
Dastan21/ScantradFranceAPI
|
e145a51d21123c518bd1bda90277bc06fff811b0
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
class Language(Enum):
eng = 0
spa = 1
fre = 2
ind = 3
por = 4
rus = 5
tha = 6
class ChapterType(Enum):
latest = 0
sequence = 1
nosequence = 2
class PageType(Enum):
single = 0
left = 1
right = 2
double = 3
| 11.56
| 24
| 0.525952
|
f94c01f90d9cfbb8e9216cf28a462d6c60cbcaf0
| 2,337
|
py
|
Python
|
src/api2db/install/clear_lab.py
|
TristenHarr/api2db
|
8c8b14280441f5153ff146c23359a0eb91022ddb
|
[
"MIT"
] | 45
|
2021-05-05T01:34:20.000Z
|
2021-11-02T08:41:34.000Z
|
src/api2db/install/clear_lab.py
|
TristenHarr/api2db
|
8c8b14280441f5153ff146c23359a0eb91022ddb
|
[
"MIT"
] | 1
|
2021-06-02T11:43:33.000Z
|
2021-06-02T20:32:29.000Z
|
src/api2db/install/clear_lab.py
|
TristenHarr/api2db
|
8c8b14280441f5153ff146c23359a0eb91022ddb
|
[
"MIT"
] | 3
|
2021-05-08T21:49:24.000Z
|
2021-05-13T23:14:09.000Z
|
import os
def clab():
"""
This shell command is used to clear a lab.
::
project_dir-----/
|
apis-------/
| |- __init__.py
| |- FooCollector.py
| |- BarCollector.py
|
AUTH-------/
| |- bigquery_auth_template.json
| |- omnisci_auth_template.json
| |- sql_auth_template.json
|
CACHE/
|
STORE/
|
laboratory-/
| |- lab.py EDIT THIS FILE!
|
helpers.py
|
main.py
**Shell Command:** ``path/to/project_dir> clab``
::
project_dir-----/
|
apis-----/
| |- __init__.py
| |- FooCollector.py
| |- BarCollector.py
|
AUTH-----/
| |- bigquery_auth_template.json
| |- omnisci_auth_template.json
| |- sql_auth_template.json
|
CACHE/
|
STORE/
|
helpers.py
|
main.py
"""
lab_dir_path = os.path.join(os.getcwd(), "laboratory")
if not os.path.isdir(lab_dir_path):
print("No lab exists... skipping")
return
clear = input(f"WARNING: ALL ITEMS IN DIRECTORY {lab_dir_path} WILL BE DELETED. Continue? (y/n) ")
if clear != "y":
return
sure = input(f"Are you sure you want to delete all items from {lab_dir_path}? This cannot be undone. (y/n) ")
if sure != "y":
return
for root, dirs, files in os.walk(lab_dir_path, False):
for f in files:
os.remove(os.path.join(root, f))
for d in dirs:
os.rmdir(os.path.join(root, d))
os.rmdir(lab_dir_path)
| 31.16
| 113
| 0.350021
|
feaf4a2c17fb13f6795ea73e997c65ca24a32a20
| 3,642
|
py
|
Python
|
app/app/settings.py
|
seObando19/BlogPost
|
2cf6b53bc202da51fd891c2596a651a39b54e41a
|
[
"PostgreSQL",
"Unlicense"
] | null | null | null |
app/app/settings.py
|
seObando19/BlogPost
|
2cf6b53bc202da51fd891c2596a651a39b54e41a
|
[
"PostgreSQL",
"Unlicense"
] | null | null | null |
app/app/settings.py
|
seObando19/BlogPost
|
2cf6b53bc202da51fd891c2596a651a39b54e41a
|
[
"PostgreSQL",
"Unlicense"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
# reading .env file
environ.Env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'indexTemplate.apps.IndextemplateConfig',
'users.apps.UsersConfig',
'post.apps.PostConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': env('POSTGRESQL_NAME'),
'USER': env('POSTGRESQL_USER'),
'PASSWORD': env('POSTGRESQL_PASS'),
'HOST': env('POSTGRESQL_HOST'),
'PORT': env('POSTGRESQL_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = 'index'
| 25.468531
| 91
| 0.695222
|
82f1ffc25463e948452201200c957bdbf77c8961
| 12,110
|
py
|
Python
|
bootstrap/win/win_tools.py
|
fanbojie/depot_tools
|
355e97e300e8baceae8353287ad59b915dbb8196
|
[
"BSD-3-Clause"
] | 1
|
2021-09-10T06:10:02.000Z
|
2021-09-10T06:10:02.000Z
|
bootstrap/win/win_tools.py
|
fanbojie/depot_tools
|
355e97e300e8baceae8353287ad59b915dbb8196
|
[
"BSD-3-Clause"
] | null | null | null |
bootstrap/win/win_tools.py
|
fanbojie/depot_tools
|
355e97e300e8baceae8353287ad59b915dbb8196
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import contextlib
import fnmatch
import hashlib
import logging
import os
import platform
import posixpath
import shutil
import string
import subprocess
import sys
import tempfile
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..'))
DEVNULL = open(os.devnull, 'w')
BAT_EXT = '.bat' if sys.platform.startswith('win') else ''
# Top-level stubs to generate that fall through to executables within the Git
# directory.
STUBS = {
'git.bat': 'cmd\\git.exe',
'gitk.bat': 'cmd\\gitk.exe',
'ssh.bat': 'usr\\bin\\ssh.exe',
'ssh-keygen.bat': 'usr\\bin\\ssh-keygen.exe',
}
# Accumulated template parameters for generated stubs.
class Template(collections.namedtuple('Template', (
'PYTHON_RELDIR', 'PYTHON_BIN_RELDIR', 'PYTHON_BIN_RELDIR_UNIX',
'PYTHON3_BIN_RELDIR', 'PYTHON3_BIN_RELDIR_UNIX', 'GIT_BIN_RELDIR',
'GIT_BIN_RELDIR_UNIX', 'GIT_PROGRAM',
))):
@classmethod
def empty(cls):
return cls(**{k: None for k in cls._fields})
def maybe_install(self, name, dst_path):
"""Installs template |name| to |dst_path| if it has changed.
This loads the template |name| from THIS_DIR, resolves template parameters,
and installs it to |dst_path|. See `maybe_update` for more information.
Args:
name (str): The name of the template to install.
dst_path (str): The destination filesystem path.
Returns (bool): True if |dst_path| was updated, False otherwise.
"""
template_path = os.path.join(THIS_DIR, name)
with open(template_path, 'r') as fd:
t = string.Template(fd.read())
return maybe_update(t.safe_substitute(self._asdict()), dst_path)
def maybe_update(content, dst_path):
"""Writes |content| to |dst_path| if |dst_path| does not already match.
This function will ensure that there is a file at |dst_path| containing
|content|. If |dst_path| already exists and contains |content|, no operation
will be performed, preserving filesystem modification times and avoiding
potential write contention.
Args:
content (str): The file content.
dst_path (str): The destination filesystem path.
Returns (bool): True if |dst_path| was updated, False otherwise.
"""
# If the path already exists and matches the new content, refrain from writing
# a new one.
if os.path.exists(dst_path):
with open(dst_path, 'r') as fd:
if fd.read() == content:
return False
logging.debug('Updating %r', dst_path)
with open(dst_path, 'w') as fd:
fd.write(content)
return True
def maybe_copy(src_path, dst_path):
"""Writes the content of |src_path| to |dst_path| if needed.
See `maybe_update` for more information.
Args:
src_path (str): The content source filesystem path.
dst_path (str): The destination filesystem path.
Returns (bool): True if |dst_path| was updated, False otherwise.
"""
with open(src_path, 'r') as fd:
content = fd.read()
return maybe_update(content, dst_path)
def call_if_outdated(stamp_path, stamp_version, fn):
"""Invokes |fn| if the stamp at |stamp_path| doesn't match |stamp_version|.
This can be used to keep a filesystem record of whether an operation has been
performed. The record is stored at |stamp_path|. To invalidate a record,
change the value of |stamp_version|.
After |fn| completes successfully, |stamp_path| will be updated to match
|stamp_version|, preventing the same update from happening in the future.
Args:
stamp_path (str): The filesystem path of the stamp file.
stamp_version (str): The desired stamp version.
fn (callable): A callable to invoke if the current stamp version doesn't
match |stamp_version|.
Returns (bool): True if an update occurred.
"""
stamp_version = stamp_version.strip()
if os.path.isfile(stamp_path):
with open(stamp_path, 'r') as fd:
current_version = fd.read().strip()
if current_version == stamp_version:
return False
fn()
with open(stamp_path, 'w') as fd:
fd.write(stamp_version)
return True
def _in_use(path):
"""Checks if a Windows file is in use.
When Windows is using an executable, it prevents other writers from
modifying or deleting that executable. We can safely test for an in-use
file by opening it in write mode and checking whether or not there was
an error.
Returns (bool): True if the file was in use, False if not.
"""
try:
with open(path, 'r+'):
return False
except IOError:
return True
def _toolchain_in_use(toolchain_path):
"""Returns (bool): True if a toolchain rooted at |path| is in use.
"""
# Look for Python files that may be in use.
for python_dir in (
os.path.join(toolchain_path, 'python', 'bin'), # CIPD
toolchain_path, # Legacy ZIP distributions.
):
for component in (
os.path.join(python_dir, 'python.exe'),
os.path.join(python_dir, 'DLLs', 'unicodedata.pyd'),
):
if os.path.isfile(component) and _in_use(component):
return True
# Look for Pytho:n 3 files that may be in use.
python_dir = os.path.join(toolchain_path, 'python3', 'bin')
for component in (
os.path.join(python_dir, 'python3.exe'),
os.path.join(python_dir, 'DLLs', 'unicodedata.pyd'),
):
if os.path.isfile(component) and _in_use(component):
return True
return False
def _check_call(argv, stdin_input=None, **kwargs):
"""Wrapper for subprocess.check_call that adds logging."""
logging.info('running %r', argv)
if stdin_input is not None:
kwargs['stdin'] = subprocess.PIPE
proc = subprocess.Popen(argv, **kwargs)
proc.communicate(input=stdin_input)
if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, argv, None)
def _safe_rmtree(path):
if not os.path.exists(path):
return
def _make_writable_and_remove(path):
st = os.stat(path)
new_mode = st.st_mode | 0200
if st.st_mode == new_mode:
return False
try:
os.chmod(path, new_mode)
os.remove(path)
return True
except Exception:
return False
def _on_error(function, path, excinfo):
if not _make_writable_and_remove(path):
logging.warning('Failed to %s: %s (%s)', function, path, excinfo)
shutil.rmtree(path, onerror=_on_error)
def clean_up_old_installations(skip_dir):
"""Removes Python installations other than |skip_dir|.
This includes an "in-use" check against the "python.exe" in a given directory
to avoid removing Python executables that are currently ruinning. We need
this because our Python bootstrap may be run after (and by) other software
that is using the bootstrapped Python!
"""
root_contents = os.listdir(ROOT_DIR)
for f in ('win_tools-*_bin', 'python27*_bin', 'git-*_bin'):
for entry in fnmatch.filter(root_contents, f):
full_entry = os.path.join(ROOT_DIR, entry)
if full_entry == skip_dir or not os.path.isdir(full_entry):
continue
logging.info('Cleaning up old installation %r', entry)
if not _toolchain_in_use(full_entry):
_safe_rmtree(full_entry)
else:
logging.info('Toolchain at %r is in-use; skipping', full_entry)
# Version of "git_postprocess" system configuration (see |git_postprocess|).
GIT_POSTPROCESS_VERSION = '2'
def git_get_mingw_dir(git_directory):
"""Returns (str) The "mingw" directory in a Git installation, or None."""
for candidate in ('mingw64', 'mingw32'):
mingw_dir = os.path.join(git_directory, candidate)
if os.path.isdir(mingw_dir):
return mingw_dir
return None
def git_postprocess(template, git_directory):
# Update depot_tools files for "git help <command>"
mingw_dir = git_get_mingw_dir(git_directory)
if mingw_dir:
docsrc = os.path.join(ROOT_DIR, 'man', 'html')
git_docs_dir = os.path.join(mingw_dir, 'share', 'doc', 'git-doc')
for name in os.listdir(docsrc):
maybe_copy(
os.path.join(docsrc, name),
os.path.join(git_docs_dir, name))
else:
logging.info('Could not find mingw directory for %r.', git_directory)
# Create Git templates and configure its base layout.
for stub_name, relpath in STUBS.iteritems():
stub_template = template._replace(GIT_PROGRAM=relpath)
stub_template.maybe_install(
'git.template.bat',
os.path.join(ROOT_DIR, stub_name))
# Set-up our system configuration environment. The following set of
# parameters is versioned by "GIT_POSTPROCESS_VERSION". If they change,
# update "GIT_POSTPROCESS_VERSION" accordingly.
def configure_git_system():
git_bat_path = os.path.join(ROOT_DIR, 'git.bat')
_check_call([git_bat_path, 'config', '--system', 'core.autocrlf', 'false'])
_check_call([git_bat_path, 'config', '--system', 'core.filemode', 'false'])
_check_call([git_bat_path, 'config', '--system', 'core.preloadindex',
'true'])
_check_call([git_bat_path, 'config', '--system', 'core.fscache', 'true'])
_check_call([git_bat_path, 'config', '--system', 'protocol.version', '2'])
call_if_outdated(
os.path.join(git_directory, '.git_postprocess'),
GIT_POSTPROCESS_VERSION,
configure_git_system)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--win-tools-name', required=True,
help='The directory of the Python installation.')
parser.add_argument('--bleeding-edge', action='store_true',
help='Force bleeding edge Git.')
args = parser.parse_args(argv)
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN)
template = Template.empty()._replace(
PYTHON_RELDIR=os.path.join(args.win_tools_name, 'python'),
PYTHON_BIN_RELDIR=os.path.join(args.win_tools_name, 'python', 'bin'),
PYTHON_BIN_RELDIR_UNIX=posixpath.join(
args.win_tools_name, 'python', 'bin'),
PYTHON3_BIN_RELDIR=os.path.join(args.win_tools_name, 'python3', 'bin'),
PYTHON3_BIN_RELDIR_UNIX=posixpath.join(
args.win_tools_name, 'python3', 'bin'),
GIT_BIN_RELDIR=os.path.join(args.win_tools_name, 'git'),
GIT_BIN_RELDIR_UNIX=posixpath.join(args.win_tools_name, 'git'))
win_tools_dir = os.path.join(ROOT_DIR, args.win_tools_name)
git_postprocess(template, os.path.join(win_tools_dir, 'git'))
# Clean up any old Python and Git installations.
clean_up_old_installations(win_tools_dir)
# Emit our Python bin depot-tools-relative directory. This is ready by
# "python.bat" to identify the path of the current Python installation.
#
# We use this indirection so that upgrades can change this pointer to
# redirect "python.bat" to a new Python installation. We can't just update
# "python.bat" because batch file executions reload the batch file and seek
# to the previous cursor in between every command, so changing the batch
# file contents could invalidate any existing executions.
#
# The intention is that the batch file itself never needs to change when
# switching Python versions.
maybe_update(
template.PYTHON_BIN_RELDIR,
os.path.join(ROOT_DIR, 'python_bin_reldir.txt'))
maybe_update(
template.PYTHON3_BIN_RELDIR,
os.path.join(ROOT_DIR, 'python3_bin_reldir.txt'))
python_bat_template = ('python27.new.bat' if not args.bleeding_edge
else 'python27.bleeding_edge.bat')
python3_bat_template = ('python3.new.bat' if not args.bleeding_edge
else 'python3.bleeding_edge.bat')
# Re-evaluate and regenerate our root templated files.
for src_name, dst_name in (
('git-bash.template.sh', 'git-bash'),
(python_bat_template, 'python.bat'),
(python3_bat_template, 'python3.bat'),
):
template.maybe_install(src_name, os.path.join(ROOT_DIR, dst_name))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 33.921569
| 80
| 0.700908
|
cd415f502777060e8ed92aa8593236280dc5b274
| 6,587
|
py
|
Python
|
docs/source/conf.py
|
Booteille/Dragonfire
|
17d67c89d46a0f29cee99239109fddfccc5e6ab3
|
[
"MIT"
] | 1
|
2019-04-15T14:04:03.000Z
|
2019-04-15T14:04:03.000Z
|
docs/source/conf.py
|
Booteille/Dragonfire
|
17d67c89d46a0f29cee99239109fddfccc5e6ab3
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
Booteille/Dragonfire
|
17d67c89d46a0f29cee99239109fddfccc5e6ab3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
from recommonmark.parser import CommonMarkParser
from unittest.mock import MagicMock
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'Dragonfire'
copyright = '2018, Mehmet Mert Yıldıran'
author = 'Mehmet Mert Yıldıran'
# The short X.Y version
version = '1.0.0'
# The full version, including alpha/beta/rc tags
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'm2r',
]
# Napoleon settings
napoleon_google_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Dragonfiredoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Dragonfire.tex', 'Dragonfire Documentation',
'Mehmet Mert Yıldıran', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dragonfire', 'Dragonfire Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Dragonfire', 'Dragonfire Documentation',
author, 'Dragonfire', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Show __init__(self) methods in the documentation
autoclass_content = 'both'
# Post-install script for sphinx-build
def setup(app):
import nltk
nltk.download('names')
nltk.download('brown')
nltk.download('wordnet')
nltk.download('punkt')
# Mock out modules
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
'realhud',
'spacy',
'youtube_dl',
'dragonfire.deepconv',
'dragonfire.deepconv.model',
'dragonfire.deepconv.textdata',
'dragonfire.deepconv.corpus.cornelldata',
'dragonfire.deepconv.corpus.lightweightdata',
'dragonfire.deepconv.corpus.opensubsdata',
'dragonfire.deepconv.corpus.scotusdata',
'dragonfire.deepconv.corpus.ubuntudata',
'dragonfire.deepconv.corpus',
'dragonfire.sr.decoder_test',
'dragonfire.sr.experimental'
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
| 29.40625
| 79
| 0.657659
|
56cd0a910c3865e4325e506bdc864d66485602ff
| 4,163
|
py
|
Python
|
venv/lib/python3.8/site-packages/ete3/test/test_treeview/item_faces.py
|
bjru/dendogram-traversal
|
410bea2dd852caef5fd9d4dde9306203a2d29220
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/ete3/test/test_treeview/item_faces.py
|
bjru/dendogram-traversal
|
410bea2dd852caef5fd9d4dde9306203a2d29220
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/ete3/test/test_treeview/item_faces.py
|
bjru/dendogram-traversal
|
410bea2dd852caef5fd9d4dde9306203a2d29220
|
[
"MIT"
] | null | null | null |
# To play with random colors
import colorsys
import random
from ... import Tree, faces, TreeStyle, NodeStyle, Face
# We will need to create Qt4 items
from ...treeview.qt import QtCore, Qt
from ...treeview.qt import QGraphicsRectItem, QGraphicsSimpleTextItem, \
QGraphicsEllipseItem, QColor, QPen, QBrush
class InteractiveItem(QGraphicsRectItem):
def __init__(self, *arg, **karg):
QGraphicsRectItem.__init__(self, *arg, **karg)
self.node = None
self.label = None
self.setCursor(QtCore.Qt.PointingHandCursor)
def hoverEnterEvent (self, e):
# There are many ways of adding interactive elements. With the
# following code, I show/hide a text item over my custom
# DynamicItemFace
if not self.label:
self.label = QGraphicsRectItem()
self.label.setParentItem(self)
# This is to ensure that the label is rendered over the
# rest of item children (default ZValue for items is 0)
self.label.setZValue(1)
self.label.setBrush(QBrush(QColor("white")))
self.label.text = QGraphicsSimpleTextItem()
self.label.text.setParentItem(self.label)
self.label.text.setText(self.node.name)
self.label.setRect(self.label.text.boundingRect())
self.label.setVisible(True)
def hoverLeaveEvent(self, e):
if self.label:
self.label.setVisible(False)
def random_color(h=None):
"""Generates a random color in RGB format."""
if not h:
h = random.random()
s = 0.5
l = 0.5
return _hls2hex(h, l, s)
def _hls2hex(h, l, s):
return '#%02x%02x%02x' %tuple(map(lambda x: int(x*255),
colorsys.hls_to_rgb(h, l, s)))
def ugly_name_face(node, *args, **kargs):
""" This is my item generator. It must receive a node object, and
returns a Qt4 graphics item that can be used as a node face.
"""
# receive an arbitrary number of arguments, in this case width and
# height of the faces
width = args[0]
height = args[1]
## Creates a main master Item that will contain all other elements
## Items can be standard QGraphicsItem
# masterItem = QGraphicsRectItem(0, 0, width, height)
# Or your custom Items, in which you can re-implement interactive
# functions, etc. Check QGraphicsItem doc for details.
masterItem = InteractiveItem(0, 0, width, height)
masterItem.setAcceptHoverEvents(True)
# Keep a link within the item to access node info
masterItem.node = node
# I dont want a border around the masterItem
masterItem.setPen(QPen(QtCore.Qt.NoPen))
# Add ellipse around text
ellipse = QGraphicsEllipseItem(masterItem.rect())
ellipse.setParentItem(masterItem)
# Change ellipse color
ellipse.setBrush(QBrush(QColor( random_color())))
# Add node name within the ellipse
text = QGraphicsSimpleTextItem(node.name)
text.setParentItem(ellipse)
text.setPen(QPen(QPen(QColor("white"))))
# Center text according to masterItem size
tw = text.boundingRect().width()
th = text.boundingRect().height()
center = masterItem.boundingRect().center()
text.setPos(center.x()-tw/2, center.y()-th/2)
return masterItem
def master_ly(node):
if node.is_leaf():
# Create an ItemFAce. First argument must be the pointer to
# the constructor function that returns a QGraphicsItem. It
# will be used to draw the Face. Next arguments are arbitrary,
# and they will be forwarded to the constructor Face function.
F = faces.DynamicItemFace(ugly_name_face, 100, 50)
faces.add_face_to_node(F, node, 0, position="aligned")
def get_example_tree():
t = Tree()
t.populate(8, reuse_names=False)
ts = TreeStyle()
ts.layout_fn = master_ly
ts.title.add_face(faces.TextFace("Drawing your own Qt Faces", fsize=15), 0)
return t, ts
if __name__ == "__main__":
t, ts = get_example_tree()
#t.render("item_faces.png", h=400, tree_style=ts)
# The interactive features are only available using the GUI
t.show(tree_style=ts)
| 33.572581
| 79
| 0.665386
|
0d246082da4518ccd067247af37aa779b4966425
| 706
|
py
|
Python
|
setup.py
|
parsel-l/mcavatar
|
795ef2a167e1e85ba5a45187c59eb59458e0c39a
|
[
"MIT"
] | null | null | null |
setup.py
|
parsel-l/mcavatar
|
795ef2a167e1e85ba5a45187c59eb59458e0c39a
|
[
"MIT"
] | null | null | null |
setup.py
|
parsel-l/mcavatar
|
795ef2a167e1e85ba5a45187c59eb59458e0c39a
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mcavatar", # Replace with your username
version="1.0.0",
author="parsel",
author_email="parsel.pip@gmail.com",
description="Lightweight & Blazing-Fast minecraft skin asset tool",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/parsel-l/mcavatar",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 17.65
| 71
| 0.652975
|
42c11dd40a12b9c8e914463001612502b999b908
| 4,207
|
py
|
Python
|
longclaw/orders/api.py
|
YokoTheSlayer/longclaw
|
83801feb24ca6f0dfe0fb7a2ef2e9614e5b75611
|
[
"MIT"
] | null | null | null |
longclaw/orders/api.py
|
YokoTheSlayer/longclaw
|
83801feb24ca6f0dfe0fb7a2ef2e9614e5b75611
|
[
"MIT"
] | null | null | null |
longclaw/orders/api.py
|
YokoTheSlayer/longclaw
|
83801feb24ca6f0dfe0fb7a2ef2e9614e5b75611
|
[
"MIT"
] | null | null | null |
from rest_framework.decorators import detail_route
from rest_framework import permissions, status, viewsets
from rest_framework.response import Response
from longclaw.orders.models import Order, OrderItem
from longclaw.orders.serializers import OrderSerializer
from os import environ
import requests
import datetime
import hashlib
import smtplib
from email.mime.text import MIMEText
class OrderViewSet(viewsets.ModelViewSet):
serializer_class = OrderSerializer
permission_classes = [permissions.IsAdminUser]
queryset = Order.objects.all()
def send_mail_link(self, new_response, data):
#new_response['PAY_LINK'] = 'HTTPS://TEST.RU' #УДАЛИТЬ ПРИ ПРОДАКШНЕ
msg = MIMEText('Ссылка для оплаты вашего заказа: "{}"'.format(new_response['PAY_LINK']))
msg['Subject'] = ("Ссылка для оплаты заказа")
msg['From'] = environ['SMTP_HOST_LOGIN']
msg['To'] = data.email
s = smtplib.SMTP_SSL(environ['SMTP_HOST'], environ['SMTP_PORT'])
s.login(environ['SMTP_HOST_LOGIN'], environ['SMTP_HOST_PASSWORD'])
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.quit()
def analyze_response(self, data, order):
#data['RESULT'] = '0'
#data['PAY_ID'] = '1111'
if data['RESULT'] == '105':
print('Партнер заблокирован для проведения Платежей')
return
elif data['RESULT'] == '0':
print('Операция успешно выполнена')
order.transaction_id = data['PAY_ID']
order.save()
self.send_mail_link(data, order)
elif data['RESULT'] == '2':
print('Wrong parameter {}'.format(data['RESULT_DESC']))
return
elif data['RESULT'] == '3':
print('Smth wrong with system')
return
elif data['RESULT'] == '106':
print('Payment in progress or already done')
print(data['PAY_ID'], data['STATUS'], data['SDCODE'])
return
elif data['RESULT'] == '108':
print('Wrong operation')
return
else:
print('another error')
return
def create_params(self, data):
price = data.total*100
now = datetime.datetime.now()
hashed_data = hashlib.md5(
str.encode(
environ['ID_TERMINAL'] +
environ['LOGIN'] +
environ['PASSWORD']))
params = {
'OPERATION': 'CreatePayment',
'TERMINAL_ID': environ['ID_TERMINAL'],
'ARTICLE_ID': environ['ID_ARTICLE'],
'MPAY_ID': data.mpay_id,
'MDATETIME': now.isoformat(),
'AMOUNT': price,
'CURRENCY': 'RUR',
'RETURN_URL': environ['RETURN_URL'],
'FAIL_URL': environ['FAIL_URL'],
'IDENTITY': hashed_data.hexdigest()}
return params
def response_to_dict(self, data):
splitted_list = data.text.split('&')
new_splitted_list = []
for item in splitted_list:
new_splitted_list.append(item.split('='))
return dict(new_splitted_list)
@detail_route(methods=['post'])
def shipped_order(self, request, pk):
"""Refund the order specified by the pk
"""
order = Order.objects.get(id=pk)
order.shipped()
return Response(status=status.HTTP_204_NO_CONTENT)
@detail_route(methods=['post'])
def fulfill_order(self, request, pk):
"""Mark the order specified by pk as fulfilled
"""
order = Order.objects.get(id=pk)
order.mpay_id = order.id
response = requests.post(
'https://w.red-pay.ru/partner/3/acquiring',
data=self.create_params(order))
new_response = self.response_to_dict(response)
self.analyze_response(new_response, order)
order.fulfill()
return Response(status=status.HTTP_204_NO_CONTENT)
@detail_route(methods=['post'])
def in_process_order(self, request, pk):
"""Mark the order specified by pk as fulfilled
"""
order = Order.objects.get(id=pk)
order.in_process()
return Response(status=status.HTTP_204_NO_CONTENT)
| 35.652542
| 96
| 0.601854
|
cb694f8a8ad9284a2fdd8bac48ede33cb9f59555
| 51
|
py
|
Python
|
macefash/settings.example.py
|
palcu/macefash
|
6c7b9ab87708546ed9391205a602beb48beb00d8
|
[
"MIT"
] | 1
|
2018-04-25T11:33:55.000Z
|
2018-04-25T11:33:55.000Z
|
macefash/settings.example.py
|
palcu/macefash
|
6c7b9ab87708546ed9391205a602beb48beb00d8
|
[
"MIT"
] | null | null | null |
macefash/settings.example.py
|
palcu/macefash
|
6c7b9ab87708546ed9391205a602beb48beb00d8
|
[
"MIT"
] | 1
|
2018-04-25T11:33:56.000Z
|
2018-04-25T11:33:56.000Z
|
SETTINGS = {
'auth': [('schioara', 'oarba')]
}
| 12.75
| 35
| 0.490196
|
c28d52f132cd3ac7301ce02a6a8c98e47c6e7d3e
| 242,280
|
py
|
Python
|
qbob/formatter/QSharpParser.py
|
quantum-experiments/qbob
|
6b33ea48a03e1e194dc87878b1d88395e560dff0
|
[
"MIT"
] | null | null | null |
qbob/formatter/QSharpParser.py
|
quantum-experiments/qbob
|
6b33ea48a03e1e194dc87878b1d88395e560dff0
|
[
"MIT"
] | null | null | null |
qbob/formatter/QSharpParser.py
|
quantum-experiments/qbob
|
6b33ea48a03e1e194dc87878b1d88395e560dff0
|
[
"MIT"
] | null | null | null |
# Generated from QSharpParser.g4 by ANTLR 4.9
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\u0088")
buf.write("\u02b9\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\3\2\7\2T\n\2\f\2\16\2W\13\2\3\2")
buf.write("\3\2\3\3\3\3\5\3]\n\3\3\4\3\4\3\4\3\4\7\4c\n\4\f\4\16")
buf.write("\4f\13\4\3\4\3\4\3\5\3\5\3\5\7\5m\n\5\f\5\16\5p\13\5\3")
buf.write("\6\3\6\3\6\5\6u\n\6\3\7\3\7\3\7\3\7\5\7{\n\7\3\7\3\7\3")
buf.write("\b\3\b\3\b\3\t\3\t\3\n\7\n\u0085\n\n\f\n\16\n\u0088\13")
buf.write("\n\3\n\5\n\u008b\n\n\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\f\3\f\5\f\u0096\n\f\3\r\3\r\3\r\3\r\7\r\u009c\n\r\f")
buf.write("\r\16\r\u009f\13\r\5\r\u00a1\n\r\3\r\3\r\3\16\3\16\5\16")
buf.write("\u00a7\n\16\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\5")
buf.write("\20\u00b1\n\20\3\20\3\20\3\20\3\20\5\20\u00b7\n\20\3\20")
buf.write("\3\20\3\21\3\21\3\21\3\21\7\21\u00bf\n\21\f\21\16\21\u00c2")
buf.write("\13\21\5\21\u00c4\n\21\3\21\3\21\3\22\3\22\3\22\3\22\7")
buf.write("\22\u00cc\n\22\f\22\16\22\u00cf\13\22\5\22\u00d1\n\22")
buf.write("\3\22\3\22\3\23\3\23\5\23\u00d7\n\23\3\24\3\24\3\24\3")
buf.write("\25\3\25\3\25\3\25\3\25\3\25\3\25\5\25\u00e3\n\25\3\25")
buf.write("\3\25\3\25\3\25\3\25\3\25\7\25\u00eb\n\25\f\25\16\25\u00ee")
buf.write("\13\25\3\26\3\26\7\26\u00f2\n\26\f\26\16\26\u00f5\13\26")
buf.write("\3\26\3\26\5\26\u00f9\n\26\3\27\6\27\u00fc\n\27\r\27\16")
buf.write("\27\u00fd\3\27\3\27\3\30\3\30\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\31\3\31\3\31\3\31\3\31\3\31\5\31\u010f\n\31\3\32\5")
buf.write("\32\u0112\n\32\3\32\3\32\3\33\3\33\3\33\3\33\7\33\u011a")
buf.write("\n\33\f\33\16\33\u011d\13\33\5\33\u011f\n\33\3\33\3\33")
buf.write("\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35")
buf.write("\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\7\35\u0137")
buf.write("\n\35\f\35\16\35\u013a\13\35\3\35\5\35\u013d\n\35\5\35")
buf.write("\u013f\n\35\3\35\3\35\3\35\3\35\5\35\u0145\n\35\3\35\3")
buf.write("\35\5\35\u0149\n\35\3\35\3\35\3\35\7\35\u014e\n\35\f\35")
buf.write("\16\35\u0151\13\35\3\36\3\36\3\36\3\36\3\36\3\36\3\36")
buf.write("\3\36\3\36\3\36\5\36\u015d\n\36\3\37\3\37\3\37\3\37\3")
buf.write("\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\5\37")
buf.write("\u01af\n\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3")
buf.write("\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\5\37\u01c5\n\37\3 \3 \7 \u01c9\n \f \16 \u01cc\13")
buf.write(" \3 \3 \3!\3!\3!\3!\3!\3!\7!\u01d6\n!\f!\16!\u01d9\13")
buf.write("!\3!\5!\u01dc\n!\5!\u01de\n!\3!\5!\u01e1\n!\3\"\3\"\3")
buf.write("#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\7#\u01f1\n#\f#\16#")
buf.write("\u01f4\13#\3#\5#\u01f7\n#\5#\u01f9\n#\3#\5#\u01fc\n#\3")
buf.write("$\3$\3$\3$\3$\3$\3$\7$\u0205\n$\f$\16$\u0208\13$\3$\5")
buf.write("$\u020b\n$\5$\u020d\n$\3$\5$\u0210\n$\3$\3$\3$\3$\3$\7")
buf.write("$\u0217\n$\f$\16$\u021a\13$\3$\3$\3$\7$\u021f\n$\f$\16")
buf.write("$\u0222\13$\3$\3$\3$\3$\3$\3$\3$\3$\7$\u022c\n$\f$\16")
buf.write("$\u022f\13$\3$\5$\u0232\n$\5$\u0234\n$\3$\3$\3$\3$\3$")
buf.write("\7$\u023b\n$\f$\16$\u023e\13$\3$\5$\u0241\n$\5$\u0243")
buf.write("\n$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\5")
buf.write("$\u0255\n$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3")
buf.write("$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3")
buf.write("$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3")
buf.write("$\3$\3$\3$\3$\5$\u028e\n$\3$\3$\3$\3$\3$\3$\3$\7$\u0297")
buf.write("\n$\f$\16$\u029a\13$\3$\5$\u029d\n$\5$\u029f\n$\3$\3$")
buf.write("\3$\7$\u02a4\n$\f$\16$\u02a7\13$\3%\3%\3&\3&\3\'\3\'\3")
buf.write("(\3(\3)\3)\3)\3)\3)\3)\5)\u02b7\n)\3)\2\5(8F*\2\4\6\b")
buf.write("\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668")
buf.write(":<>@BDFHJLNP\2\21\4\2\31\31))\5\2\5\5\13\13\17\17\4\2")
buf.write("TT{{\4\2AAVV\17\2??CCKK\\\\^^bbeeiikkmmooqqss\5\2&&[[")
buf.write("tt\5\2BBaahh\4\2[[dd\4\2nnpp\3\2WZ\4\2RR]]\4\2\26\268")
buf.write("8\4\2\'\'>>\3\2,/\3\2\u0082\u0083\2\u031a\2U\3\2\2\2\4")
buf.write("\\\3\2\2\2\6^\3\2\2\2\bi\3\2\2\2\nt\3\2\2\2\fv\3\2\2\2")
buf.write("\16~\3\2\2\2\20\u0081\3\2\2\2\22\u0086\3\2\2\2\24\u008c")
buf.write("\3\2\2\2\26\u0095\3\2\2\2\30\u0097\3\2\2\2\32\u00a6\3")
buf.write("\2\2\2\34\u00a8\3\2\2\2\36\u00ac\3\2\2\2 \u00ba\3\2\2")
buf.write("\2\"\u00c7\3\2\2\2$\u00d6\3\2\2\2&\u00d8\3\2\2\2(\u00e2")
buf.write("\3\2\2\2*\u00f8\3\2\2\2,\u00fb\3\2\2\2.\u0101\3\2\2\2")
buf.write("\60\u010e\3\2\2\2\62\u0111\3\2\2\2\64\u0115\3\2\2\2\66")
buf.write("\u0122\3\2\2\28\u0148\3\2\2\2:\u015c\3\2\2\2<\u01c4\3")
buf.write("\2\2\2>\u01c6\3\2\2\2@\u01e0\3\2\2\2B\u01e2\3\2\2\2D\u01fb")
buf.write("\3\2\2\2F\u0254\3\2\2\2H\u02a8\3\2\2\2J\u02aa\3\2\2\2")
buf.write("L\u02ac\3\2\2\2N\u02ae\3\2\2\2P\u02b6\3\2\2\2RT\5\4\3")
buf.write("\2SR\3\2\2\2TW\3\2\2\2US\3\2\2\2UV\3\2\2\2VX\3\2\2\2W")
buf.write("U\3\2\2\2XY\7\2\2\3Y\3\3\2\2\2Z]\5\6\4\2[]\5\n\6\2\\Z")
buf.write("\3\2\2\2\\[\3\2\2\2]\5\3\2\2\2^_\7#\2\2_`\5\b\5\2`d\7")
buf.write("F\2\2ac\5\n\6\2ba\3\2\2\2cf\3\2\2\2db\3\2\2\2de\3\2\2")
buf.write("\2eg\3\2\2\2fd\3\2\2\2gh\7G\2\2h\7\3\2\2\2in\7{\2\2jk")
buf.write("\7O\2\2km\7{\2\2lj\3\2\2\2mp\3\2\2\2nl\3\2\2\2no\3\2\2")
buf.write("\2o\t\3\2\2\2pn\3\2\2\2qu\5\f\7\2ru\5\24\13\2su\5\36\20")
buf.write("\2tq\3\2\2\2tr\3\2\2\2ts\3\2\2\2u\13\3\2\2\2vw\7(\2\2")
buf.write("wz\5\b\5\2xy\7\b\2\2y{\5\b\5\2zx\3\2\2\2z{\3\2\2\2{|\3")
buf.write("\2\2\2|}\7g\2\2}\r\3\2\2\2~\177\7D\2\2\177\u0080\5F$\2")
buf.write("\u0080\17\3\2\2\2\u0081\u0082\7\35\2\2\u0082\21\3\2\2")
buf.write("\2\u0083\u0085\5\16\b\2\u0084\u0083\3\2\2\2\u0085\u0088")
buf.write("\3\2\2\2\u0086\u0084\3\2\2\2\u0086\u0087\3\2\2\2\u0087")
buf.write("\u008a\3\2\2\2\u0088\u0086\3\2\2\2\u0089\u008b\5\20\t")
buf.write("\2\u008a\u0089\3\2\2\2\u008a\u008b\3\2\2\2\u008b\23\3")
buf.write("\2\2\2\u008c\u008d\5\22\n\2\u008d\u008e\7%\2\2\u008e\u008f")
buf.write("\7{\2\2\u008f\u0090\7U\2\2\u0090\u0091\5\26\f\2\u0091")
buf.write("\u0092\7g\2\2\u0092\25\3\2\2\2\u0093\u0096\5\30\r\2\u0094")
buf.write("\u0096\58\35\2\u0095\u0093\3\2\2\2\u0095\u0094\3\2\2\2")
buf.write("\u0096\27\3\2\2\2\u0097\u00a0\7_\2\2\u0098\u009d\5\32")
buf.write("\16\2\u0099\u009a\7M\2\2\u009a\u009c\5\32\16\2\u009b\u0099")
buf.write("\3\2\2\2\u009c\u009f\3\2\2\2\u009d\u009b\3\2\2\2\u009d")
buf.write("\u009e\3\2\2\2\u009e\u00a1\3\2\2\2\u009f\u009d\3\2\2\2")
buf.write("\u00a0\u0098\3\2\2\2\u00a0\u00a1\3\2\2\2\u00a1\u00a2\3")
buf.write("\2\2\2\u00a2\u00a3\7`\2\2\u00a3\31\3\2\2\2\u00a4\u00a7")
buf.write("\5\34\17\2\u00a5\u00a7\5\26\f\2\u00a6\u00a4\3\2\2\2\u00a6")
buf.write("\u00a5\3\2\2\2\u00a7\33\3\2\2\2\u00a8\u00a9\7{\2\2\u00a9")
buf.write("\u00aa\7L\2\2\u00aa\u00ab\58\35\2\u00ab\35\3\2\2\2\u00ac")
buf.write("\u00ad\5\22\n\2\u00ad\u00ae\t\2\2\2\u00ae\u00b0\7{\2\2")
buf.write("\u00af\u00b1\5 \21\2\u00b0\u00af\3\2\2\2\u00b0\u00b1\3")
buf.write("\2\2\2\u00b1\u00b2\3\2\2\2\u00b2\u00b3\5\"\22\2\u00b3")
buf.write("\u00b4\7L\2\2\u00b4\u00b6\58\35\2\u00b5\u00b7\5&\24\2")
buf.write("\u00b6\u00b5\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00b8\3")
buf.write("\2\2\2\u00b8\u00b9\5*\26\2\u00b9\37\3\2\2\2\u00ba\u00c3")
buf.write("\7Y\2\2\u00bb\u00c0\7~\2\2\u00bc\u00bd\7M\2\2\u00bd\u00bf")
buf.write("\7~\2\2\u00be\u00bc\3\2\2\2\u00bf\u00c2\3\2\2\2\u00c0")
buf.write("\u00be\3\2\2\2\u00c0\u00c1\3\2\2\2\u00c1\u00c4\3\2\2\2")
buf.write("\u00c2\u00c0\3\2\2\2\u00c3\u00bb\3\2\2\2\u00c3\u00c4\3")
buf.write("\2\2\2\u00c4\u00c5\3\2\2\2\u00c5\u00c6\7W\2\2\u00c6!\3")
buf.write("\2\2\2\u00c7\u00d0\7_\2\2\u00c8\u00cd\5$\23\2\u00c9\u00ca")
buf.write("\7M\2\2\u00ca\u00cc\5$\23\2\u00cb\u00c9\3\2\2\2\u00cc")
buf.write("\u00cf\3\2\2\2\u00cd\u00cb\3\2\2\2\u00cd\u00ce\3\2\2\2")
buf.write("\u00ce\u00d1\3\2\2\2\u00cf\u00cd\3\2\2\2\u00d0\u00c8\3")
buf.write("\2\2\2\u00d0\u00d1\3\2\2\2\u00d1\u00d2\3\2\2\2\u00d2\u00d3")
buf.write("\7`\2\2\u00d3#\3\2\2\2\u00d4\u00d7\5\34\17\2\u00d5\u00d7")
buf.write("\5\"\22\2\u00d6\u00d4\3\2\2\2\u00d6\u00d5\3\2\2\2\u00d7")
buf.write("%\3\2\2\2\u00d8\u00d9\7 \2\2\u00d9\u00da\5(\25\2\u00da")
buf.write("\'\3\2\2\2\u00db\u00dc\b\25\1\2\u00dc\u00e3\7\3\2\2\u00dd")
buf.write("\u00e3\7\20\2\2\u00de\u00df\7_\2\2\u00df\u00e0\5(\25\2")
buf.write("\u00e0\u00e1\7`\2\2\u00e1\u00e3\3\2\2\2\u00e2\u00db\3")
buf.write("\2\2\2\u00e2\u00dd\3\2\2\2\u00e2\u00de\3\2\2\2\u00e3\u00ec")
buf.write("\3\2\2\2\u00e4\u00e5\f\4\2\2\u00e5\u00e6\7B\2\2\u00e6")
buf.write("\u00eb\5(\25\5\u00e7\u00e8\f\3\2\2\u00e8\u00e9\7d\2\2")
buf.write("\u00e9\u00eb\5(\25\4\u00ea\u00e4\3\2\2\2\u00ea\u00e7\3")
buf.write("\2\2\2\u00eb\u00ee\3\2\2\2\u00ec\u00ea\3\2\2\2\u00ec\u00ed")
buf.write("\3\2\2\2\u00ed)\3\2\2\2\u00ee\u00ec\3\2\2\2\u00ef\u00f3")
buf.write("\7F\2\2\u00f0\u00f2\5,\27\2\u00f1\u00f0\3\2\2\2\u00f2")
buf.write("\u00f5\3\2\2\2\u00f3\u00f1\3\2\2\2\u00f3\u00f4\3\2\2\2")
buf.write("\u00f4\u00f6\3\2\2\2\u00f5\u00f3\3\2\2\2\u00f6\u00f9\7")
buf.write("G\2\2\u00f7\u00f9\5> \2\u00f8\u00ef\3\2\2\2\u00f8\u00f7")
buf.write("\3\2\2\2\u00f9+\3\2\2\2\u00fa\u00fc\5.\30\2\u00fb\u00fa")
buf.write("\3\2\2\2\u00fc\u00fd\3\2\2\2\u00fd\u00fb\3\2\2\2\u00fd")
buf.write("\u00fe\3\2\2\2\u00fe\u00ff\3\2\2\2\u00ff\u0100\5\60\31")
buf.write("\2\u0100-\3\2\2\2\u0101\u0102\t\3\2\2\u0102/\3\2\2\2\u0103")
buf.write("\u0104\7\t\2\2\u0104\u010f\7g\2\2\u0105\u0106\7\65\2\2")
buf.write("\u0106\u010f\7g\2\2\u0107\u0108\7\37\2\2\u0108\u010f\7")
buf.write("g\2\2\u0109\u010a\7\21\2\2\u010a\u010f\7g\2\2\u010b\u010c")
buf.write("\7\36\2\2\u010c\u010f\7g\2\2\u010d\u010f\5\62\32\2\u010e")
buf.write("\u0103\3\2\2\2\u010e\u0105\3\2\2\2\u010e\u0107\3\2\2\2")
buf.write("\u010e\u0109\3\2\2\2\u010e\u010b\3\2\2\2\u010e\u010d\3")
buf.write("\2\2\2\u010f\61\3\2\2\2\u0110\u0112\5\64\33\2\u0111\u0110")
buf.write("\3\2\2\2\u0111\u0112\3\2\2\2\u0112\u0113\3\2\2\2\u0113")
buf.write("\u0114\5> \2\u0114\63\3\2\2\2\u0115\u011e\7_\2\2\u0116")
buf.write("\u011b\5\66\34\2\u0117\u0118\7M\2\2\u0118\u011a\5\66\34")
buf.write("\2\u0119\u0117\3\2\2\2\u011a\u011d\3\2\2\2\u011b\u0119")
buf.write("\3\2\2\2\u011b\u011c\3\2\2\2\u011c\u011f\3\2\2\2\u011d")
buf.write("\u011b\3\2\2\2\u011e\u0116\3\2\2\2\u011e\u011f\3\2\2\2")
buf.write("\u011f\u0120\3\2\2\2\u0120\u0121\7`\2\2\u0121\65\3\2\2")
buf.write("\2\u0122\u0123\t\4\2\2\u0123\67\3\2\2\2\u0124\u0125\b")
buf.write("\35\1\2\u0125\u0149\7u\2\2\u0126\u0149\7~\2\2\u0127\u0149")
buf.write("\7\n\2\2\u0128\u0149\7\f\2\2\u0129\u0149\7\22\2\2\u012a")
buf.write("\u0149\7\34\2\2\u012b\u0149\7+\2\2\u012c\u0149\7\60\2")
buf.write("\2\u012d\u0149\7\61\2\2\u012e\u0149\7\63\2\2\u012f\u0149")
buf.write("\7\67\2\2\u0130\u0149\79\2\2\u0131\u0149\5\b\5\2\u0132")
buf.write("\u013e\7_\2\2\u0133\u0138\58\35\2\u0134\u0135\7M\2\2\u0135")
buf.write("\u0137\58\35\2\u0136\u0134\3\2\2\2\u0137\u013a\3\2\2\2")
buf.write("\u0138\u0136\3\2\2\2\u0138\u0139\3\2\2\2\u0139\u013c\3")
buf.write("\2\2\2\u013a\u0138\3\2\2\2\u013b\u013d\7M\2\2\u013c\u013b")
buf.write("\3\2\2\2\u013c\u013d\3\2\2\2\u013d\u013f\3\2\2\2\u013e")
buf.write("\u0133\3\2\2\2\u013e\u013f\3\2\2\2\u013f\u0140\3\2\2\2")
buf.write("\u0140\u0149\7`\2\2\u0141\u0142\7_\2\2\u0142\u0144\5:")
buf.write("\36\2\u0143\u0145\5&\24\2\u0144\u0143\3\2\2\2\u0144\u0145")
buf.write("\3\2\2\2\u0145\u0146\3\2\2\2\u0146\u0147\7`\2\2\u0147")
buf.write("\u0149\3\2\2\2\u0148\u0124\3\2\2\2\u0148\u0126\3\2\2\2")
buf.write("\u0148\u0127\3\2\2\2\u0148\u0128\3\2\2\2\u0148\u0129\3")
buf.write("\2\2\2\u0148\u012a\3\2\2\2\u0148\u012b\3\2\2\2\u0148\u012c")
buf.write("\3\2\2\2\u0148\u012d\3\2\2\2\u0148\u012e\3\2\2\2\u0148")
buf.write("\u012f\3\2\2\2\u0148\u0130\3\2\2\2\u0148\u0131\3\2\2\2")
buf.write("\u0148\u0132\3\2\2\2\u0148\u0141\3\2\2\2\u0149\u014f\3")
buf.write("\2\2\2\u014a\u014b\f\3\2\2\u014b\u014c\7H\2\2\u014c\u014e")
buf.write("\7I\2\2\u014d\u014a\3\2\2\2\u014e\u0151\3\2\2\2\u014f")
buf.write("\u014d\3\2\2\2\u014f\u0150\3\2\2\2\u01509\3\2\2\2\u0151")
buf.write("\u014f\3\2\2\2\u0152\u0153\7_\2\2\u0153\u0154\58\35\2")
buf.write("\u0154\u0155\t\5\2\2\u0155\u0156\58\35\2\u0156\u0157\7")
buf.write("`\2\2\u0157\u015d\3\2\2\2\u0158\u0159\58\35\2\u0159\u015a")
buf.write("\t\5\2\2\u015a\u015b\58\35\2\u015b\u015d\3\2\2\2\u015c")
buf.write("\u0152\3\2\2\2\u015c\u0158\3\2\2\2\u015d;\3\2\2\2\u015e")
buf.write("\u015f\5F$\2\u015f\u0160\7g\2\2\u0160\u01c5\3\2\2\2\u0161")
buf.write("\u0162\7\64\2\2\u0162\u0163\5F$\2\u0163\u0164\7g\2\2\u0164")
buf.write("\u01c5\3\2\2\2\u0165\u0166\7\25\2\2\u0166\u0167\5F$\2")
buf.write("\u0167\u0168\7g\2\2\u0168\u01c5\3\2\2\2\u0169\u016a\7")
buf.write("!\2\2\u016a\u016b\5@!\2\u016b\u016c\7U\2\2\u016c\u016d")
buf.write("\5F$\2\u016d\u016e\7g\2\2\u016e\u01c5\3\2\2\2\u016f\u0170")
buf.write("\7\"\2\2\u0170\u0171\5@!\2\u0171\u0172\7U\2\2\u0172\u0173")
buf.write("\5F$\2\u0173\u0174\7g\2\2\u0174\u01c5\3\2\2\2\u0175\u0176")
buf.write("\7\66\2\2\u0176\u0177\5@!\2\u0177\u0178\7U\2\2\u0178\u0179")
buf.write("\5F$\2\u0179\u017a\7g\2\2\u017a\u01c5\3\2\2\2\u017b\u017c")
buf.write("\7\66\2\2\u017c\u017d\7{\2\2\u017d\u017e\5B\"\2\u017e")
buf.write("\u017f\5F$\2\u017f\u0180\7g\2\2\u0180\u01c5\3\2\2\2\u0181")
buf.write("\u0182\7\66\2\2\u0182\u0183\7{\2\2\u0183\u0184\7w\2\2")
buf.write("\u0184\u0185\5F$\2\u0185\u0186\7@\2\2\u0186\u0187\5F$")
buf.write("\2\u0187\u0188\7g\2\2\u0188\u01c5\3\2\2\2\u0189\u018a")
buf.write("\7\32\2\2\u018a\u018b\7_\2\2\u018b\u018c\5F$\2\u018c\u018d")
buf.write("\7`\2\2\u018d\u018e\5> \2\u018e\u01c5\3\2\2\2\u018f\u0190")
buf.write("\7\23\2\2\u0190\u0191\7_\2\2\u0191\u0192\5F$\2\u0192\u0193")
buf.write("\7`\2\2\u0193\u0194\5> \2\u0194\u01c5\3\2\2\2\u0195\u0196")
buf.write("\7\24\2\2\u0196\u01c5\5> \2\u0197\u0198\7\30\2\2\u0198")
buf.write("\u0199\7_\2\2\u0199\u019a\5@!\2\u019a\u019b\7\33\2\2\u019b")
buf.write("\u019c\5F$\2\u019c\u019d\7`\2\2\u019d\u019e\5> \2\u019e")
buf.write("\u01c5\3\2\2\2\u019f\u01a0\7<\2\2\u01a0\u01a1\7_\2\2\u01a1")
buf.write("\u01a2\5F$\2\u01a2\u01a3\7`\2\2\u01a3\u01a4\5> \2\u01a4")
buf.write("\u01c5\3\2\2\2\u01a5\u01a6\7\62\2\2\u01a6\u01c5\5> \2")
buf.write("\u01a7\u01a8\7:\2\2\u01a8\u01a9\7_\2\2\u01a9\u01aa\5F")
buf.write("$\2\u01aa\u01ae\7`\2\2\u01ab\u01af\7g\2\2\u01ac\u01ad")
buf.write("\7\27\2\2\u01ad\u01af\5> \2\u01ae\u01ab\3\2\2\2\u01ae")
buf.write("\u01ac\3\2\2\2\u01af\u01c5\3\2\2\2\u01b0\u01b1\7=\2\2")
buf.write("\u01b1\u01c5\5> \2\u01b2\u01b3\7\7\2\2\u01b3\u01c5\5>")
buf.write(" \2\u01b4\u01b5\7;\2\2\u01b5\u01b6\7_\2\2\u01b6\u01b7")
buf.write("\5@!\2\u01b7\u01b8\7U\2\2\u01b8\u01b9\5D#\2\u01b9\u01ba")
buf.write("\7`\2\2\u01ba\u01bb\5> \2\u01bb\u01c5\3\2\2\2\u01bc\u01bd")
buf.write("\7\r\2\2\u01bd\u01be\7_\2\2\u01be\u01bf\5@!\2\u01bf\u01c0")
buf.write("\7U\2\2\u01c0\u01c1\5D#\2\u01c1\u01c2\7`\2\2\u01c2\u01c3")
buf.write("\5> \2\u01c3\u01c5\3\2\2\2\u01c4\u015e\3\2\2\2\u01c4\u0161")
buf.write("\3\2\2\2\u01c4\u0165\3\2\2\2\u01c4\u0169\3\2\2\2\u01c4")
buf.write("\u016f\3\2\2\2\u01c4\u0175\3\2\2\2\u01c4\u017b\3\2\2\2")
buf.write("\u01c4\u0181\3\2\2\2\u01c4\u0189\3\2\2\2\u01c4\u018f\3")
buf.write("\2\2\2\u01c4\u0195\3\2\2\2\u01c4\u0197\3\2\2\2\u01c4\u019f")
buf.write("\3\2\2\2\u01c4\u01a5\3\2\2\2\u01c4\u01a7\3\2\2\2\u01c4")
buf.write("\u01b0\3\2\2\2\u01c4\u01b2\3\2\2\2\u01c4\u01b4\3\2\2\2")
buf.write("\u01c4\u01bc\3\2\2\2\u01c5=\3\2\2\2\u01c6\u01ca\7F\2\2")
buf.write("\u01c7\u01c9\5<\37\2\u01c8\u01c7\3\2\2\2\u01c9\u01cc\3")
buf.write("\2\2\2\u01ca\u01c8\3\2\2\2\u01ca\u01cb\3\2\2\2\u01cb\u01cd")
buf.write("\3\2\2\2\u01cc\u01ca\3\2\2\2\u01cd\u01ce\7G\2\2\u01ce")
buf.write("?\3\2\2\2\u01cf\u01e1\7u\2\2\u01d0\u01e1\7{\2\2\u01d1")
buf.write("\u01dd\7_\2\2\u01d2\u01d7\5@!\2\u01d3\u01d4\7M\2\2\u01d4")
buf.write("\u01d6\5@!\2\u01d5\u01d3\3\2\2\2\u01d6\u01d9\3\2\2\2\u01d7")
buf.write("\u01d5\3\2\2\2\u01d7\u01d8\3\2\2\2\u01d8\u01db\3\2\2\2")
buf.write("\u01d9\u01d7\3\2\2\2\u01da\u01dc\7M\2\2\u01db\u01da\3")
buf.write("\2\2\2\u01db\u01dc\3\2\2\2\u01dc\u01de\3\2\2\2\u01dd\u01d2")
buf.write("\3\2\2\2\u01dd\u01de\3\2\2\2\u01de\u01df\3\2\2\2\u01df")
buf.write("\u01e1\7`\2\2\u01e0\u01cf\3\2\2\2\u01e0\u01d0\3\2\2\2")
buf.write("\u01e0\u01d1\3\2\2\2\u01e1A\3\2\2\2\u01e2\u01e3\t\6\2")
buf.write("\2\u01e3C\3\2\2\2\u01e4\u01e5\7\60\2\2\u01e5\u01e6\7_")
buf.write("\2\2\u01e6\u01fc\7`\2\2\u01e7\u01e8\7\60\2\2\u01e8\u01e9")
buf.write("\7H\2\2\u01e9\u01ea\5F$\2\u01ea\u01eb\7I\2\2\u01eb\u01fc")
buf.write("\3\2\2\2\u01ec\u01f8\7_\2\2\u01ed\u01f2\5D#\2\u01ee\u01ef")
buf.write("\7M\2\2\u01ef\u01f1\5D#\2\u01f0\u01ee\3\2\2\2\u01f1\u01f4")
buf.write("\3\2\2\2\u01f2\u01f0\3\2\2\2\u01f2\u01f3\3\2\2\2\u01f3")
buf.write("\u01f6\3\2\2\2\u01f4\u01f2\3\2\2\2\u01f5\u01f7\7M\2\2")
buf.write("\u01f6\u01f5\3\2\2\2\u01f6\u01f7\3\2\2\2\u01f7\u01f9\3")
buf.write("\2\2\2\u01f8\u01ed\3\2\2\2\u01f8\u01f9\3\2\2\2\u01f9\u01fa")
buf.write("\3\2\2\2\u01fa\u01fc\7`\2\2\u01fb\u01e4\3\2\2\2\u01fb")
buf.write("\u01e7\3\2\2\2\u01fb\u01ec\3\2\2\2\u01fcE\3\2\2\2\u01fd")
buf.write("\u01fe\b$\1\2\u01fe\u0255\7u\2\2\u01ff\u020f\5\b\5\2\u0200")
buf.write("\u020c\7Y\2\2\u0201\u0206\58\35\2\u0202\u0203\7M\2\2\u0203")
buf.write("\u0205\58\35\2\u0204\u0202\3\2\2\2\u0205\u0208\3\2\2\2")
buf.write("\u0206\u0204\3\2\2\2\u0206\u0207\3\2\2\2\u0207\u020a\3")
buf.write("\2\2\2\u0208\u0206\3\2\2\2\u0209\u020b\7M\2\2\u020a\u0209")
buf.write("\3\2\2\2\u020a\u020b\3\2\2\2\u020b\u020d\3\2\2\2\u020c")
buf.write("\u0201\3\2\2\2\u020c\u020d\3\2\2\2\u020d\u020e\3\2\2\2")
buf.write("\u020e\u0210\7W\2\2\u020f\u0200\3\2\2\2\u020f\u0210\3")
buf.write("\2\2\2\u0210\u0255\3\2\2\2\u0211\u0255\7x\2\2\u0212\u0255")
buf.write("\7y\2\2\u0213\u0255\7z\2\2\u0214\u0218\7S\2\2\u0215\u0217")
buf.write("\5N(\2\u0216\u0215\3\2\2\2\u0217\u021a\3\2\2\2\u0218\u0216")
buf.write("\3\2\2\2\u0218\u0219\3\2\2\2\u0219\u021b\3\2\2\2\u021a")
buf.write("\u0218\3\2\2\2\u021b\u0255\7\u0084\2\2\u021c\u0220\7N")
buf.write("\2\2\u021d\u021f\5P)\2\u021e\u021d\3\2\2\2\u021f\u0222")
buf.write("\3\2\2\2\u0220\u021e\3\2\2\2\u0220\u0221\3\2\2\2\u0221")
buf.write("\u0223\3\2\2\2\u0222\u0220\3\2\2\2\u0223\u0255\7\u0088")
buf.write("\2\2\u0224\u0255\5H%\2\u0225\u0255\5J&\2\u0226\u0255\5")
buf.write("L\'\2\u0227\u0233\7_\2\2\u0228\u022d\5F$\2\u0229\u022a")
buf.write("\7M\2\2\u022a\u022c\5F$\2\u022b\u0229\3\2\2\2\u022c\u022f")
buf.write("\3\2\2\2\u022d\u022b\3\2\2\2\u022d\u022e\3\2\2\2\u022e")
buf.write("\u0231\3\2\2\2\u022f\u022d\3\2\2\2\u0230\u0232\7M\2\2")
buf.write("\u0231\u0230\3\2\2\2\u0231\u0232\3\2\2\2\u0232\u0234\3")
buf.write("\2\2\2\u0233\u0228\3\2\2\2\u0233\u0234\3\2\2\2\u0234\u0235")
buf.write("\3\2\2\2\u0235\u0255\7`\2\2\u0236\u0242\7H\2\2\u0237\u023c")
buf.write("\5F$\2\u0238\u0239\7M\2\2\u0239\u023b\5F$\2\u023a\u0238")
buf.write("\3\2\2\2\u023b\u023e\3\2\2\2\u023c\u023a\3\2\2\2\u023c")
buf.write("\u023d\3\2\2\2\u023d\u0240\3\2\2\2\u023e\u023c\3\2\2\2")
buf.write("\u023f\u0241\7M\2\2\u0240\u023f\3\2\2\2\u0240\u0241\3")
buf.write("\2\2\2\u0241\u0243\3\2\2\2\u0242\u0237\3\2\2\2\u0242\u0243")
buf.write("\3\2\2\2\u0243\u0244\3\2\2\2\u0244\u0255\7I\2\2\u0245")
buf.write("\u0246\7$\2\2\u0246\u0247\58\35\2\u0247\u0248\7H\2\2\u0248")
buf.write("\u0249\5F$\2\u0249\u024a\7I\2\2\u024a\u0255\3\2\2\2\u024b")
buf.write("\u024c\7\16\2\2\u024c\u0255\5F$\27\u024d\u024e\7\4\2\2")
buf.write("\u024e\u0255\5F$\26\u024f\u0250\t\7\2\2\u0250\u0255\5")
buf.write("F$\24\u0251\u0252\7T\2\2\u0252\u0255\5F$\5\u0253\u0255")
buf.write("\7T\2\2\u0254\u01fd\3\2\2\2\u0254\u01ff\3\2\2\2\u0254")
buf.write("\u0211\3\2\2\2\u0254\u0212\3\2\2\2\u0254\u0213\3\2\2\2")
buf.write("\u0254\u0214\3\2\2\2\u0254\u021c\3\2\2\2\u0254\u0224\3")
buf.write("\2\2\2\u0254\u0225\3\2\2\2\u0254\u0226\3\2\2\2\u0254\u0227")
buf.write("\3\2\2\2\u0254\u0236\3\2\2\2\u0254\u0245\3\2\2\2\u0254")
buf.write("\u024b\3\2\2\2\u0254\u024d\3\2\2\2\u0254\u024f\3\2\2\2")
buf.write("\u0254\u0251\3\2\2\2\u0254\u0253\3\2\2\2\u0255\u02a5\3")
buf.write("\2\2\2\u0256\u0257\f\23\2\2\u0257\u0258\7J\2\2\u0258\u02a4")
buf.write("\5F$\23\u0259\u025a\f\22\2\2\u025a\u025b\t\b\2\2\u025b")
buf.write("\u02a4\5F$\23\u025c\u025d\f\21\2\2\u025d\u025e\t\t\2\2")
buf.write("\u025e\u02a4\5F$\22\u025f\u0260\f\20\2\2\u0260\u0261\t")
buf.write("\n\2\2\u0261\u02a4\5F$\21\u0262\u0263\f\17\2\2\u0263\u0264")
buf.write("\t\13\2\2\u0264\u02a4\5F$\20\u0265\u0266\f\16\2\2\u0266")
buf.write("\u0267\t\f\2\2\u0267\u02a4\5F$\17\u0268\u0269\f\r\2\2")
buf.write("\u0269\u026a\7j\2\2\u026a\u02a4\5F$\16\u026b\u026c\f\f")
buf.write("\2\2\u026c\u026d\7l\2\2\u026d\u02a4\5F$\r\u026e\u026f")
buf.write("\f\13\2\2\u026f\u0270\7r\2\2\u0270\u02a4\5F$\f\u0271\u0272")
buf.write("\f\n\2\2\u0272\u0273\7\6\2\2\u0273\u02a4\5F$\13\u0274")
buf.write("\u0275\f\t\2\2\u0275\u0276\7*\2\2\u0276\u02a4\5F$\n\u0277")
buf.write("\u0278\f\b\2\2\u0278\u0279\7f\2\2\u0279\u027a\5F$\2\u027a")
buf.write("\u027b\7c\2\2\u027b\u027c\5F$\b\u027c\u02a4\3\2\2\2\u027d")
buf.write("\u027e\f\7\2\2\u027e\u027f\7Q\2\2\u027f\u02a4\5F$\b\u0280")
buf.write("\u0281\f\3\2\2\u0281\u0282\7v\2\2\u0282\u0283\5F$\2\u0283")
buf.write("\u0284\7@\2\2\u0284\u0285\5F$\4\u0285\u02a4\3\2\2\2\u0286")
buf.write("\u028d\f\31\2\2\u0287\u0288\7P\2\2\u0288\u028e\7{\2\2")
buf.write("\u0289\u028a\7H\2\2\u028a\u028b\5F$\2\u028b\u028c\7I\2")
buf.write("\2\u028c\u028e\3\2\2\2\u028d\u0287\3\2\2\2\u028d\u0289")
buf.write("\3\2\2\2\u028e\u02a4\3\2\2\2\u028f\u0290\f\30\2\2\u0290")
buf.write("\u02a4\7E\2\2\u0291\u0292\f\25\2\2\u0292\u029e\7_\2\2")
buf.write("\u0293\u0298\5F$\2\u0294\u0295\7M\2\2\u0295\u0297\5F$")
buf.write("\2\u0296\u0294\3\2\2\2\u0297\u029a\3\2\2\2\u0298\u0296")
buf.write("\3\2\2\2\u0298\u0299\3\2\2\2\u0299\u029c\3\2\2\2\u029a")
buf.write("\u0298\3\2\2\2\u029b\u029d\7M\2\2\u029c\u029b\3\2\2\2")
buf.write("\u029c\u029d\3\2\2\2\u029d\u029f\3\2\2\2\u029e\u0293\3")
buf.write("\2\2\2\u029e\u029f\3\2\2\2\u029f\u02a0\3\2\2\2\u02a0\u02a4")
buf.write("\7`\2\2\u02a1\u02a2\f\6\2\2\u02a2\u02a4\7T\2\2\u02a3\u0256")
buf.write("\3\2\2\2\u02a3\u0259\3\2\2\2\u02a3\u025c\3\2\2\2\u02a3")
buf.write("\u025f\3\2\2\2\u02a3\u0262\3\2\2\2\u02a3\u0265\3\2\2\2")
buf.write("\u02a3\u0268\3\2\2\2\u02a3\u026b\3\2\2\2\u02a3\u026e\3")
buf.write("\2\2\2\u02a3\u0271\3\2\2\2\u02a3\u0274\3\2\2\2\u02a3\u0277")
buf.write("\3\2\2\2\u02a3\u027d\3\2\2\2\u02a3\u0280\3\2\2\2\u02a3")
buf.write("\u0286\3\2\2\2\u02a3\u028f\3\2\2\2\u02a3\u0291\3\2\2\2")
buf.write("\u02a3\u02a1\3\2\2\2\u02a4\u02a7\3\2\2\2\u02a5\u02a3\3")
buf.write("\2\2\2\u02a5\u02a6\3\2\2\2\u02a6G\3\2\2\2\u02a7\u02a5")
buf.write("\3\2\2\2\u02a8\u02a9\t\r\2\2\u02a9I\3\2\2\2\u02aa\u02ab")
buf.write("\t\16\2\2\u02abK\3\2\2\2\u02ac\u02ad\t\17\2\2\u02adM\3")
buf.write("\2\2\2\u02ae\u02af\t\20\2\2\u02afO\3\2\2\2\u02b0\u02b7")
buf.write("\7\u0085\2\2\u02b1\u02b2\7\u0086\2\2\u02b2\u02b3\5F$\2")
buf.write("\u02b3\u02b4\7G\2\2\u02b4\u02b7\3\2\2\2\u02b5\u02b7\7")
buf.write("\u0087\2\2\u02b6\u02b0\3\2\2\2\u02b6\u02b1\3\2\2\2\u02b6")
buf.write("\u02b5\3\2\2\2\u02b7Q\3\2\2\2EU\\dntz\u0086\u008a\u0095")
buf.write("\u009d\u00a0\u00a6\u00b0\u00b6\u00c0\u00c3\u00cd\u00d0")
buf.write("\u00d6\u00e2\u00ea\u00ec\u00f3\u00f8\u00fd\u010e\u0111")
buf.write("\u011b\u011e\u0138\u013c\u013e\u0144\u0148\u014f\u015c")
buf.write("\u01ae\u01c4\u01ca\u01d7\u01db\u01dd\u01e0\u01f2\u01f6")
buf.write("\u01f8\u01fb\u0206\u020a\u020c\u020f\u0218\u0220\u022d")
buf.write("\u0231\u0233\u023c\u0240\u0242\u0254\u028d\u0298\u029c")
buf.write("\u029e\u02a3\u02a5\u02b6")
return buf.getvalue()
class QSharpParser ( Parser ):
grammarFileName = "QSharpParser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'Adj'", "'Adjoint'", "'adjoint'", "'and'",
"'apply'", "'as'", "'auto'", "'BigInt'", "'body'",
"'Bool'", "'borrowing'", "'Controlled'", "'controlled'",
"'Ctl'", "'distribute'", "'Double'", "'elif'", "'else'",
"'fail'", "'false'", "'fixup'", "'for'", "'function'",
"'if'", "'in'", "'Int'", "'internal'", "'intrinsic'",
"'invert'", "'is'", "'let'", "'mutable'", "'namespace'",
"'new'", "'newtype'", "'not'", "'One'", "'open'", "'operation'",
"'or'", "'Pauli'", "'PauliI'", "'PauliX'", "'PauliY'",
"'PauliZ'", "'Qubit'", "'Range'", "'repeat'", "'Result'",
"'return'", "'self'", "'set'", "'String'", "'true'",
"'Unit'", "'until'", "'using'", "'while'", "'within'",
"'Zero'", "'and='", "'<-'", "'->'", "'*'", "'*='",
"'@'", "'!'", "<INVALID>", "'}'", "'['", "']'", "'^'",
"'^='", "':'", "','", "'$\"'", "'.'", "'::'", "'..'",
"'=='", "<INVALID>", "'...'", "'='", "'=>'", "'>'",
"'>='", "'<'", "'<='", "'-'", "'-='", "'!='", "'or='",
"'('", "')'", "'%'", "'%='", "'|'", "'+'", "'+='",
"'?'", "';'", "'/'", "'/='", "'&&&'", "'&&&='", "'^^^'",
"'^^^='", "'>>>'", "'>>>='", "'<<<'", "'<<<='", "'|||'",
"'|||='", "'~~~'", "'_'", "'w/'", "'w/='" ]
symbolicNames = [ "<INVALID>", "Adj", "AdjointFunctor", "AdjointGenerator",
"And", "Apply", "As", "Auto", "BigInt", "Body", "Bool",
"Borrowing", "ControlledFunctor", "ControlledGenerator",
"Ctl", "Distribute", "Double", "Elif", "Else", "Fail",
"BFalse", "Fixup", "For", "Function", "If", "In",
"Int", "Internal", "Intrinsic", "Invert", "Is", "Let",
"Mutable", "Namespace", "New", "Newtype", "Not", "One",
"Open", "Operation", "Or", "Pauli", "PauliI", "PauliX",
"PauliY", "PauliZ", "Qubit", "Range", "Repeat", "Result",
"Return", "Self", "Set", "String", "BTrue", "Unit",
"Until", "Using", "While", "Within", "Zero", "AndEqual",
"ArrowLeft", "ArrowRight", "Asterisk", "AsteriskEqual",
"At", "Bang", "BraceLeft", "BraceRight", "BracketLeft",
"BracketRight", "Caret", "CaretEqual", "Colon", "Comma",
"DollarQuote", "Dot", "DoubleColon", "DoubleDot",
"DoubleEqual", "DoubleQuote", "Ellipsis", "Equal",
"FatArrowRight", "Greater", "GreaterEqual", "Less",
"LessEqual", "Minus", "MinusEqual", "NotEqual", "OrEqual",
"ParenLeft", "ParenRight", "Percent", "PercentEqual",
"Pipe", "Plus", "PlusEqual", "Question", "Semicolon",
"Slash", "SlashEqual", "TripleAmpersand", "TripleAmpersandEqual",
"TripleCaret", "TripleCaretEqual", "TripleGreater",
"TripleGreaterEqual", "TripleLess", "TripleLessEqual",
"TriplePipe", "TriplePipeEqual", "TripleTilde", "Underscore",
"With", "WithEqual", "IntegerLiteral", "BigIntegerLiteral",
"DoubleLiteral", "Identifier", "IdentifierStart",
"IdentifierContinuation", "TypeParameter", "Whitespace",
"Comment", "Invalid", "StringEscape", "StringText",
"StringDoubleQuote", "InterpStringEscape", "InterpBraceLeft",
"InterpStringText", "InterpDoubleQuote" ]
RULE_program = 0
RULE_target = 1
RULE_namespace = 2
RULE_qualifiedName = 3
RULE_namespaceElement = 4
RULE_openDirective = 5
RULE_attribute = 6
RULE_access = 7
RULE_declarationPrefix = 8
RULE_typeDeclaration = 9
RULE_underlyingType = 10
RULE_typeDeclarationTuple = 11
RULE_typeTupleItem = 12
RULE_namedItem = 13
RULE_callableDeclaration = 14
RULE_typeParameterBinding = 15
RULE_parameterTuple = 16
RULE_parameter = 17
RULE_characteristics = 18
RULE_characteristicsExpression = 19
RULE_callableBody = 20
RULE_specialization = 21
RULE_specializationName = 22
RULE_specializationGenerator = 23
RULE_providedSpecialization = 24
RULE_specializationParameterTuple = 25
RULE_specializationParameter = 26
RULE_type_rule = 27
RULE_arrowType = 28
RULE_statement = 29
RULE_scope = 30
RULE_symbolBinding = 31
RULE_updateOperator = 32
RULE_qubitInitializer = 33
RULE_expression = 34
RULE_boolLiteral = 35
RULE_resultLiteral = 36
RULE_pauliLiteral = 37
RULE_stringContent = 38
RULE_interpStringContent = 39
ruleNames = [ "program", "target", "namespace", "qualifiedName", "namespaceElement",
"openDirective", "attribute", "access", "declarationPrefix",
"typeDeclaration", "underlyingType", "typeDeclarationTuple",
"typeTupleItem", "namedItem", "callableDeclaration",
"typeParameterBinding", "parameterTuple", "parameter",
"characteristics", "characteristicsExpression", "callableBody",
"specialization", "specializationName", "specializationGenerator",
"providedSpecialization", "specializationParameterTuple",
"specializationParameter", "type_rule", "arrowType",
"statement", "scope", "symbolBinding", "updateOperator",
"qubitInitializer", "expression", "boolLiteral", "resultLiteral",
"pauliLiteral", "stringContent", "interpStringContent" ]
EOF = Token.EOF
Adj=1
AdjointFunctor=2
AdjointGenerator=3
And=4
Apply=5
As=6
Auto=7
BigInt=8
Body=9
Bool=10
Borrowing=11
ControlledFunctor=12
ControlledGenerator=13
Ctl=14
Distribute=15
Double=16
Elif=17
Else=18
Fail=19
BFalse=20
Fixup=21
For=22
Function=23
If=24
In=25
Int=26
Internal=27
Intrinsic=28
Invert=29
Is=30
Let=31
Mutable=32
Namespace=33
New=34
Newtype=35
Not=36
One=37
Open=38
Operation=39
Or=40
Pauli=41
PauliI=42
PauliX=43
PauliY=44
PauliZ=45
Qubit=46
Range=47
Repeat=48
Result=49
Return=50
Self=51
Set=52
String=53
BTrue=54
Unit=55
Until=56
Using=57
While=58
Within=59
Zero=60
AndEqual=61
ArrowLeft=62
ArrowRight=63
Asterisk=64
AsteriskEqual=65
At=66
Bang=67
BraceLeft=68
BraceRight=69
BracketLeft=70
BracketRight=71
Caret=72
CaretEqual=73
Colon=74
Comma=75
DollarQuote=76
Dot=77
DoubleColon=78
DoubleDot=79
DoubleEqual=80
DoubleQuote=81
Ellipsis=82
Equal=83
FatArrowRight=84
Greater=85
GreaterEqual=86
Less=87
LessEqual=88
Minus=89
MinusEqual=90
NotEqual=91
OrEqual=92
ParenLeft=93
ParenRight=94
Percent=95
PercentEqual=96
Pipe=97
Plus=98
PlusEqual=99
Question=100
Semicolon=101
Slash=102
SlashEqual=103
TripleAmpersand=104
TripleAmpersandEqual=105
TripleCaret=106
TripleCaretEqual=107
TripleGreater=108
TripleGreaterEqual=109
TripleLess=110
TripleLessEqual=111
TriplePipe=112
TriplePipeEqual=113
TripleTilde=114
Underscore=115
With=116
WithEqual=117
IntegerLiteral=118
BigIntegerLiteral=119
DoubleLiteral=120
Identifier=121
IdentifierStart=122
IdentifierContinuation=123
TypeParameter=124
Whitespace=125
Comment=126
Invalid=127
StringEscape=128
StringText=129
StringDoubleQuote=130
InterpStringEscape=131
InterpBraceLeft=132
InterpStringText=133
InterpDoubleQuote=134
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ProgramContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EOF(self):
return self.getToken(QSharpParser.EOF, 0)
def target(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.TargetContext)
else:
return self.getTypedRuleContext(QSharpParser.TargetContext,i)
def getRuleIndex(self):
return QSharpParser.RULE_program
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProgram" ):
listener.enterProgram(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProgram" ):
listener.exitProgram(self)
def program(self):
localctx = QSharpParser.ProgramContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_program)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 83
self._errHandler.sync(self)
_la = self._input.LA(1)
while ((((_la - 23)) & ~0x3f) == 0 and ((1 << (_la - 23)) & ((1 << (QSharpParser.Function - 23)) | (1 << (QSharpParser.Internal - 23)) | (1 << (QSharpParser.Namespace - 23)) | (1 << (QSharpParser.Newtype - 23)) | (1 << (QSharpParser.Open - 23)) | (1 << (QSharpParser.Operation - 23)) | (1 << (QSharpParser.At - 23)))) != 0):
self.state = 80
self.target()
self.state = 85
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 86
self.match(QSharpParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TargetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def namespace(self):
return self.getTypedRuleContext(QSharpParser.NamespaceContext,0)
def namespaceElement(self):
return self.getTypedRuleContext(QSharpParser.NamespaceElementContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_target
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTarget" ):
listener.enterTarget(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTarget" ):
listener.exitTarget(self)
def target(self):
localctx = QSharpParser.TargetContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_target)
try:
self.state = 90
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [QSharpParser.Namespace]:
self.enterOuterAlt(localctx, 1)
self.state = 88
self.namespace()
pass
elif token in [QSharpParser.Function, QSharpParser.Internal, QSharpParser.Newtype, QSharpParser.Open, QSharpParser.Operation, QSharpParser.At]:
self.enterOuterAlt(localctx, 2)
self.state = 89
self.namespaceElement()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NamespaceContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.keyword = None # Token
self.name = None # QualifiedNameContext
self.openBrace = None # Token
self._namespaceElement = None # NamespaceElementContext
self.elements = list() # of NamespaceElementContexts
self.closeBrace = None # Token
def Namespace(self):
return self.getToken(QSharpParser.Namespace, 0)
def qualifiedName(self):
return self.getTypedRuleContext(QSharpParser.QualifiedNameContext,0)
def BraceLeft(self):
return self.getToken(QSharpParser.BraceLeft, 0)
def BraceRight(self):
return self.getToken(QSharpParser.BraceRight, 0)
def namespaceElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.NamespaceElementContext)
else:
return self.getTypedRuleContext(QSharpParser.NamespaceElementContext,i)
def getRuleIndex(self):
return QSharpParser.RULE_namespace
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNamespace" ):
listener.enterNamespace(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNamespace" ):
listener.exitNamespace(self)
def namespace(self):
localctx = QSharpParser.NamespaceContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_namespace)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 92
localctx.keyword = self.match(QSharpParser.Namespace)
self.state = 93
localctx.name = self.qualifiedName()
self.state = 94
localctx.openBrace = self.match(QSharpParser.BraceLeft)
self.state = 98
self._errHandler.sync(self)
_la = self._input.LA(1)
while ((((_la - 23)) & ~0x3f) == 0 and ((1 << (_la - 23)) & ((1 << (QSharpParser.Function - 23)) | (1 << (QSharpParser.Internal - 23)) | (1 << (QSharpParser.Newtype - 23)) | (1 << (QSharpParser.Open - 23)) | (1 << (QSharpParser.Operation - 23)) | (1 << (QSharpParser.At - 23)))) != 0):
self.state = 95
localctx._namespaceElement = self.namespaceElement()
localctx.elements.append(localctx._namespaceElement)
self.state = 100
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 101
localctx.closeBrace = self.match(QSharpParser.BraceRight)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QualifiedNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Identifier)
else:
return self.getToken(QSharpParser.Identifier, i)
def Dot(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Dot)
else:
return self.getToken(QSharpParser.Dot, i)
def getRuleIndex(self):
return QSharpParser.RULE_qualifiedName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQualifiedName" ):
listener.enterQualifiedName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQualifiedName" ):
listener.exitQualifiedName(self)
def qualifiedName(self):
localctx = QSharpParser.QualifiedNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_qualifiedName)
try:
self.enterOuterAlt(localctx, 1)
self.state = 103
self.match(QSharpParser.Identifier)
self.state = 108
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 104
self.match(QSharpParser.Dot)
self.state = 105
self.match(QSharpParser.Identifier)
self.state = 110
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NamespaceElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QSharpParser.RULE_namespaceElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class CallableElementContext(NamespaceElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.NamespaceElementContext
super().__init__(parser)
self.Ccallable = None # CallableDeclarationContext
self.copyFrom(ctx)
def callableDeclaration(self):
return self.getTypedRuleContext(QSharpParser.CallableDeclarationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCallableElement" ):
listener.enterCallableElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCallableElement" ):
listener.exitCallableElement(self)
class OpenElementContext(NamespaceElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.NamespaceElementContext
super().__init__(parser)
self.copyFrom(ctx)
def openDirective(self):
return self.getTypedRuleContext(QSharpParser.OpenDirectiveContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOpenElement" ):
listener.enterOpenElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOpenElement" ):
listener.exitOpenElement(self)
class TypeElementContext(NamespaceElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.NamespaceElementContext
super().__init__(parser)
self.copyFrom(ctx)
def typeDeclaration(self):
return self.getTypedRuleContext(QSharpParser.TypeDeclarationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeElement" ):
listener.enterTypeElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeElement" ):
listener.exitTypeElement(self)
def namespaceElement(self):
localctx = QSharpParser.NamespaceElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_namespaceElement)
try:
self.state = 114
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
localctx = QSharpParser.OpenElementContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 111
self.openDirective()
pass
elif la_ == 2:
localctx = QSharpParser.TypeElementContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 112
self.typeDeclaration()
pass
elif la_ == 3:
localctx = QSharpParser.CallableElementContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 113
localctx.Ccallable = self.callableDeclaration()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OpenDirectiveContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Open(self):
return self.getToken(QSharpParser.Open, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(QSharpParser.QualifiedNameContext,i)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def As(self):
return self.getToken(QSharpParser.As, 0)
def getRuleIndex(self):
return QSharpParser.RULE_openDirective
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOpenDirective" ):
listener.enterOpenDirective(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOpenDirective" ):
listener.exitOpenDirective(self)
def openDirective(self):
localctx = QSharpParser.OpenDirectiveContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_openDirective)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 116
self.match(QSharpParser.Open)
self.state = 117
self.qualifiedName()
self.state = 120
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.As:
self.state = 118
self.match(QSharpParser.As)
self.state = 119
self.qualifiedName()
self.state = 122
self.match(QSharpParser.Semicolon)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AttributeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def At(self):
return self.getToken(QSharpParser.At, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_attribute
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAttribute" ):
listener.enterAttribute(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAttribute" ):
listener.exitAttribute(self)
def attribute(self):
localctx = QSharpParser.AttributeContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_attribute)
try:
self.enterOuterAlt(localctx, 1)
self.state = 124
self.match(QSharpParser.At)
self.state = 125
self.expression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AccessContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Internal(self):
return self.getToken(QSharpParser.Internal, 0)
def getRuleIndex(self):
return QSharpParser.RULE_access
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAccess" ):
listener.enterAccess(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAccess" ):
listener.exitAccess(self)
def access(self):
localctx = QSharpParser.AccessContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_access)
try:
self.enterOuterAlt(localctx, 1)
self.state = 127
self.match(QSharpParser.Internal)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarationPrefixContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def attribute(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.AttributeContext)
else:
return self.getTypedRuleContext(QSharpParser.AttributeContext,i)
def access(self):
return self.getTypedRuleContext(QSharpParser.AccessContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_declarationPrefix
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarationPrefix" ):
listener.enterDeclarationPrefix(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarationPrefix" ):
listener.exitDeclarationPrefix(self)
def declarationPrefix(self):
localctx = QSharpParser.DeclarationPrefixContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_declarationPrefix)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 132
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==QSharpParser.At:
self.state = 129
self.attribute()
self.state = 134
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 136
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Internal:
self.state = 135
self.access()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def declarationPrefix(self):
return self.getTypedRuleContext(QSharpParser.DeclarationPrefixContext,0)
def Newtype(self):
return self.getToken(QSharpParser.Newtype, 0)
def Identifier(self):
return self.getToken(QSharpParser.Identifier, 0)
def Equal(self):
return self.getToken(QSharpParser.Equal, 0)
def underlyingType(self):
return self.getTypedRuleContext(QSharpParser.UnderlyingTypeContext,0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def getRuleIndex(self):
return QSharpParser.RULE_typeDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeDeclaration" ):
listener.enterTypeDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeDeclaration" ):
listener.exitTypeDeclaration(self)
def typeDeclaration(self):
localctx = QSharpParser.TypeDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_typeDeclaration)
try:
self.enterOuterAlt(localctx, 1)
self.state = 138
self.declarationPrefix()
self.state = 139
self.match(QSharpParser.Newtype)
self.state = 140
self.match(QSharpParser.Identifier)
self.state = 141
self.match(QSharpParser.Equal)
self.state = 142
self.underlyingType()
self.state = 143
self.match(QSharpParser.Semicolon)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnderlyingTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def typeDeclarationTuple(self):
return self.getTypedRuleContext(QSharpParser.TypeDeclarationTupleContext,0)
def type_rule(self):
return self.getTypedRuleContext(QSharpParser.Type_ruleContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_underlyingType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnderlyingType" ):
listener.enterUnderlyingType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnderlyingType" ):
listener.exitUnderlyingType(self)
def underlyingType(self):
localctx = QSharpParser.UnderlyingTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_underlyingType)
try:
self.state = 147
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,8,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 145
self.typeDeclarationTuple()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 146
self.type_rule(0)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeDeclarationTupleContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def typeTupleItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.TypeTupleItemContext)
else:
return self.getTypedRuleContext(QSharpParser.TypeTupleItemContext,i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def getRuleIndex(self):
return QSharpParser.RULE_typeDeclarationTuple
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeDeclarationTuple" ):
listener.enterTypeDeclarationTuple(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeDeclarationTuple" ):
listener.exitTypeDeclarationTuple(self)
def typeDeclarationTuple(self):
localctx = QSharpParser.TypeDeclarationTupleContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_typeDeclarationTuple)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 149
self.match(QSharpParser.ParenLeft)
self.state = 158
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.BigInt) | (1 << QSharpParser.Bool) | (1 << QSharpParser.Double) | (1 << QSharpParser.Int) | (1 << QSharpParser.Pauli) | (1 << QSharpParser.Qubit) | (1 << QSharpParser.Range) | (1 << QSharpParser.Result) | (1 << QSharpParser.String) | (1 << QSharpParser.Unit))) != 0) or ((((_la - 93)) & ~0x3f) == 0 and ((1 << (_la - 93)) & ((1 << (QSharpParser.ParenLeft - 93)) | (1 << (QSharpParser.Underscore - 93)) | (1 << (QSharpParser.Identifier - 93)) | (1 << (QSharpParser.TypeParameter - 93)))) != 0):
self.state = 150
self.typeTupleItem()
self.state = 155
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==QSharpParser.Comma:
self.state = 151
self.match(QSharpParser.Comma)
self.state = 152
self.typeTupleItem()
self.state = 157
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 160
self.match(QSharpParser.ParenRight)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeTupleItemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def namedItem(self):
return self.getTypedRuleContext(QSharpParser.NamedItemContext,0)
def underlyingType(self):
return self.getTypedRuleContext(QSharpParser.UnderlyingTypeContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_typeTupleItem
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeTupleItem" ):
listener.enterTypeTupleItem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeTupleItem" ):
listener.exitTypeTupleItem(self)
def typeTupleItem(self):
localctx = QSharpParser.TypeTupleItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_typeTupleItem)
try:
self.state = 164
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,11,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 162
self.namedItem()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 163
self.underlyingType()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NamedItemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(QSharpParser.Identifier, 0)
def Colon(self):
return self.getToken(QSharpParser.Colon, 0)
def type_rule(self):
return self.getTypedRuleContext(QSharpParser.Type_ruleContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_namedItem
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNamedItem" ):
listener.enterNamedItem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNamedItem" ):
listener.exitNamedItem(self)
def namedItem(self):
localctx = QSharpParser.NamedItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_namedItem)
try:
self.enterOuterAlt(localctx, 1)
self.state = 166
self.match(QSharpParser.Identifier)
self.state = 167
self.match(QSharpParser.Colon)
self.state = 168
self.type_rule(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CallableDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.keyword = None # Token
self.name = None # Token
self.colon = None # Token
self.returnType = None # Type_ruleContext
self.body = None # CallableBodyContext
def declarationPrefix(self):
return self.getTypedRuleContext(QSharpParser.DeclarationPrefixContext,0)
def parameterTuple(self):
return self.getTypedRuleContext(QSharpParser.ParameterTupleContext,0)
def Identifier(self):
return self.getToken(QSharpParser.Identifier, 0)
def Colon(self):
return self.getToken(QSharpParser.Colon, 0)
def type_rule(self):
return self.getTypedRuleContext(QSharpParser.Type_ruleContext,0)
def callableBody(self):
return self.getTypedRuleContext(QSharpParser.CallableBodyContext,0)
def Function(self):
return self.getToken(QSharpParser.Function, 0)
def Operation(self):
return self.getToken(QSharpParser.Operation, 0)
def typeParameterBinding(self):
return self.getTypedRuleContext(QSharpParser.TypeParameterBindingContext,0)
def characteristics(self):
return self.getTypedRuleContext(QSharpParser.CharacteristicsContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_callableDeclaration
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCallableDeclaration" ):
listener.enterCallableDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCallableDeclaration" ):
listener.exitCallableDeclaration(self)
def callableDeclaration(self):
localctx = QSharpParser.CallableDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_callableDeclaration)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 170
self.declarationPrefix()
self.state = 171
localctx.keyword = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==QSharpParser.Function or _la==QSharpParser.Operation):
localctx.keyword = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 172
localctx.name = self.match(QSharpParser.Identifier)
self.state = 174
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Less:
self.state = 173
self.typeParameterBinding()
self.state = 176
self.parameterTuple()
self.state = 177
localctx.colon = self.match(QSharpParser.Colon)
self.state = 178
localctx.returnType = self.type_rule(0)
self.state = 180
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Is:
self.state = 179
self.characteristics()
self.state = 182
localctx.body = self.callableBody()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeParameterBindingContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Less(self):
return self.getToken(QSharpParser.Less, 0)
def Greater(self):
return self.getToken(QSharpParser.Greater, 0)
def TypeParameter(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.TypeParameter)
else:
return self.getToken(QSharpParser.TypeParameter, i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def getRuleIndex(self):
return QSharpParser.RULE_typeParameterBinding
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeParameterBinding" ):
listener.enterTypeParameterBinding(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeParameterBinding" ):
listener.exitTypeParameterBinding(self)
def typeParameterBinding(self):
localctx = QSharpParser.TypeParameterBindingContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_typeParameterBinding)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 184
self.match(QSharpParser.Less)
self.state = 193
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.TypeParameter:
self.state = 185
self.match(QSharpParser.TypeParameter)
self.state = 190
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==QSharpParser.Comma:
self.state = 186
self.match(QSharpParser.Comma)
self.state = 187
self.match(QSharpParser.TypeParameter)
self.state = 192
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 195
self.match(QSharpParser.Greater)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParameterTupleContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def parameter(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ParameterContext)
else:
return self.getTypedRuleContext(QSharpParser.ParameterContext,i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def getRuleIndex(self):
return QSharpParser.RULE_parameterTuple
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameterTuple" ):
listener.enterParameterTuple(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameterTuple" ):
listener.exitParameterTuple(self)
def parameterTuple(self):
localctx = QSharpParser.ParameterTupleContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_parameterTuple)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 197
self.match(QSharpParser.ParenLeft)
self.state = 206
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.ParenLeft or _la==QSharpParser.Identifier:
self.state = 198
self.parameter()
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==QSharpParser.Comma:
self.state = 199
self.match(QSharpParser.Comma)
self.state = 200
self.parameter()
self.state = 205
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 208
self.match(QSharpParser.ParenRight)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParameterContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def namedItem(self):
return self.getTypedRuleContext(QSharpParser.NamedItemContext,0)
def parameterTuple(self):
return self.getTypedRuleContext(QSharpParser.ParameterTupleContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_parameter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameter" ):
listener.enterParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameter" ):
listener.exitParameter(self)
def parameter(self):
localctx = QSharpParser.ParameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_parameter)
try:
self.state = 212
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [QSharpParser.Identifier]:
self.enterOuterAlt(localctx, 1)
self.state = 210
self.namedItem()
pass
elif token in [QSharpParser.ParenLeft]:
self.enterOuterAlt(localctx, 2)
self.state = 211
self.parameterTuple()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CharacteristicsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Is(self):
return self.getToken(QSharpParser.Is, 0)
def characteristicsExpression(self):
return self.getTypedRuleContext(QSharpParser.CharacteristicsExpressionContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_characteristics
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCharacteristics" ):
listener.enterCharacteristics(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCharacteristics" ):
listener.exitCharacteristics(self)
def characteristics(self):
localctx = QSharpParser.CharacteristicsContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_characteristics)
try:
self.enterOuterAlt(localctx, 1)
self.state = 214
self.match(QSharpParser.Is)
self.state = 215
self.characteristicsExpression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CharacteristicsExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Adj(self):
return self.getToken(QSharpParser.Adj, 0)
def Ctl(self):
return self.getToken(QSharpParser.Ctl, 0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def characteristicsExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.CharacteristicsExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.CharacteristicsExpressionContext,i)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def Asterisk(self):
return self.getToken(QSharpParser.Asterisk, 0)
def Plus(self):
return self.getToken(QSharpParser.Plus, 0)
def getRuleIndex(self):
return QSharpParser.RULE_characteristicsExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCharacteristicsExpression" ):
listener.enterCharacteristicsExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCharacteristicsExpression" ):
listener.exitCharacteristicsExpression(self)
def characteristicsExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = QSharpParser.CharacteristicsExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 38
self.enterRecursionRule(localctx, 38, self.RULE_characteristicsExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 224
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [QSharpParser.Adj]:
self.state = 218
self.match(QSharpParser.Adj)
pass
elif token in [QSharpParser.Ctl]:
self.state = 219
self.match(QSharpParser.Ctl)
pass
elif token in [QSharpParser.ParenLeft]:
self.state = 220
self.match(QSharpParser.ParenLeft)
self.state = 221
self.characteristicsExpression(0)
self.state = 222
self.match(QSharpParser.ParenRight)
pass
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 234
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 232
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
localctx = QSharpParser.CharacteristicsExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_characteristicsExpression)
self.state = 226
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 227
self.match(QSharpParser.Asterisk)
self.state = 228
self.characteristicsExpression(3)
pass
elif la_ == 2:
localctx = QSharpParser.CharacteristicsExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_characteristicsExpression)
self.state = 229
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 230
self.match(QSharpParser.Plus)
self.state = 231
self.characteristicsExpression(2)
pass
self.state = 236
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class CallableBodyContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BraceLeft(self):
return self.getToken(QSharpParser.BraceLeft, 0)
def BraceRight(self):
return self.getToken(QSharpParser.BraceRight, 0)
def specialization(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.SpecializationContext)
else:
return self.getTypedRuleContext(QSharpParser.SpecializationContext,i)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_callableBody
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCallableBody" ):
listener.enterCallableBody(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCallableBody" ):
listener.exitCallableBody(self)
def callableBody(self):
localctx = QSharpParser.CallableBodyContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_callableBody)
self._la = 0 # Token type
try:
self.state = 246
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,23,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 237
self.match(QSharpParser.BraceLeft)
self.state = 241
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.AdjointGenerator) | (1 << QSharpParser.Body) | (1 << QSharpParser.ControlledGenerator))) != 0):
self.state = 238
self.specialization()
self.state = 243
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 244
self.match(QSharpParser.BraceRight)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 245
self.scope()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SpecializationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def specializationGenerator(self):
return self.getTypedRuleContext(QSharpParser.SpecializationGeneratorContext,0)
def specializationName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.SpecializationNameContext)
else:
return self.getTypedRuleContext(QSharpParser.SpecializationNameContext,i)
def getRuleIndex(self):
return QSharpParser.RULE_specialization
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecialization" ):
listener.enterSpecialization(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecialization" ):
listener.exitSpecialization(self)
def specialization(self):
localctx = QSharpParser.SpecializationContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_specialization)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 249
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 248
self.specializationName()
self.state = 251
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.AdjointGenerator) | (1 << QSharpParser.Body) | (1 << QSharpParser.ControlledGenerator))) != 0)):
break
self.state = 253
self.specializationGenerator()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SpecializationNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Body(self):
return self.getToken(QSharpParser.Body, 0)
def AdjointGenerator(self):
return self.getToken(QSharpParser.AdjointGenerator, 0)
def ControlledGenerator(self):
return self.getToken(QSharpParser.ControlledGenerator, 0)
def getRuleIndex(self):
return QSharpParser.RULE_specializationName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecializationName" ):
listener.enterSpecializationName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecializationName" ):
listener.exitSpecializationName(self)
def specializationName(self):
localctx = QSharpParser.SpecializationNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_specializationName)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 255
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.AdjointGenerator) | (1 << QSharpParser.Body) | (1 << QSharpParser.ControlledGenerator))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SpecializationGeneratorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Auto(self):
return self.getToken(QSharpParser.Auto, 0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def Self(self):
return self.getToken(QSharpParser.Self, 0)
def Invert(self):
return self.getToken(QSharpParser.Invert, 0)
def Distribute(self):
return self.getToken(QSharpParser.Distribute, 0)
def Intrinsic(self):
return self.getToken(QSharpParser.Intrinsic, 0)
def providedSpecialization(self):
return self.getTypedRuleContext(QSharpParser.ProvidedSpecializationContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_specializationGenerator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecializationGenerator" ):
listener.enterSpecializationGenerator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecializationGenerator" ):
listener.exitSpecializationGenerator(self)
def specializationGenerator(self):
localctx = QSharpParser.SpecializationGeneratorContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_specializationGenerator)
try:
self.state = 268
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [QSharpParser.Auto]:
self.enterOuterAlt(localctx, 1)
self.state = 257
self.match(QSharpParser.Auto)
self.state = 258
self.match(QSharpParser.Semicolon)
pass
elif token in [QSharpParser.Self]:
self.enterOuterAlt(localctx, 2)
self.state = 259
self.match(QSharpParser.Self)
self.state = 260
self.match(QSharpParser.Semicolon)
pass
elif token in [QSharpParser.Invert]:
self.enterOuterAlt(localctx, 3)
self.state = 261
self.match(QSharpParser.Invert)
self.state = 262
self.match(QSharpParser.Semicolon)
pass
elif token in [QSharpParser.Distribute]:
self.enterOuterAlt(localctx, 4)
self.state = 263
self.match(QSharpParser.Distribute)
self.state = 264
self.match(QSharpParser.Semicolon)
pass
elif token in [QSharpParser.Intrinsic]:
self.enterOuterAlt(localctx, 5)
self.state = 265
self.match(QSharpParser.Intrinsic)
self.state = 266
self.match(QSharpParser.Semicolon)
pass
elif token in [QSharpParser.BraceLeft, QSharpParser.ParenLeft]:
self.enterOuterAlt(localctx, 6)
self.state = 267
self.providedSpecialization()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ProvidedSpecializationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def specializationParameterTuple(self):
return self.getTypedRuleContext(QSharpParser.SpecializationParameterTupleContext,0)
def getRuleIndex(self):
return QSharpParser.RULE_providedSpecialization
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProvidedSpecialization" ):
listener.enterProvidedSpecialization(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProvidedSpecialization" ):
listener.exitProvidedSpecialization(self)
def providedSpecialization(self):
localctx = QSharpParser.ProvidedSpecializationContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_providedSpecialization)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 271
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.ParenLeft:
self.state = 270
self.specializationParameterTuple()
self.state = 273
self.scope()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SpecializationParameterTupleContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def specializationParameter(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.SpecializationParameterContext)
else:
return self.getTypedRuleContext(QSharpParser.SpecializationParameterContext,i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def getRuleIndex(self):
return QSharpParser.RULE_specializationParameterTuple
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecializationParameterTuple" ):
listener.enterSpecializationParameterTuple(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecializationParameterTuple" ):
listener.exitSpecializationParameterTuple(self)
def specializationParameterTuple(self):
localctx = QSharpParser.SpecializationParameterTupleContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_specializationParameterTuple)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 275
self.match(QSharpParser.ParenLeft)
self.state = 284
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Ellipsis or _la==QSharpParser.Identifier:
self.state = 276
self.specializationParameter()
self.state = 281
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==QSharpParser.Comma:
self.state = 277
self.match(QSharpParser.Comma)
self.state = 278
self.specializationParameter()
self.state = 283
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 286
self.match(QSharpParser.ParenRight)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SpecializationParameterContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Identifier(self):
return self.getToken(QSharpParser.Identifier, 0)
def Ellipsis(self):
return self.getToken(QSharpParser.Ellipsis, 0)
def getRuleIndex(self):
return QSharpParser.RULE_specializationParameter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecializationParameter" ):
listener.enterSpecializationParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecializationParameter" ):
listener.exitSpecializationParameter(self)
def specializationParameter(self):
localctx = QSharpParser.SpecializationParameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_specializationParameter)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 288
_la = self._input.LA(1)
if not(_la==QSharpParser.Ellipsis or _la==QSharpParser.Identifier):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Type_ruleContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # QualifiedNameContext
def Underscore(self):
return self.getToken(QSharpParser.Underscore, 0)
def TypeParameter(self):
return self.getToken(QSharpParser.TypeParameter, 0)
def BigInt(self):
return self.getToken(QSharpParser.BigInt, 0)
def Bool(self):
return self.getToken(QSharpParser.Bool, 0)
def Double(self):
return self.getToken(QSharpParser.Double, 0)
def Int(self):
return self.getToken(QSharpParser.Int, 0)
def Pauli(self):
return self.getToken(QSharpParser.Pauli, 0)
def Qubit(self):
return self.getToken(QSharpParser.Qubit, 0)
def Range(self):
return self.getToken(QSharpParser.Range, 0)
def Result(self):
return self.getToken(QSharpParser.Result, 0)
def String(self):
return self.getToken(QSharpParser.String, 0)
def Unit(self):
return self.getToken(QSharpParser.Unit, 0)
def qualifiedName(self):
return self.getTypedRuleContext(QSharpParser.QualifiedNameContext,0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def type_rule(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.Type_ruleContext)
else:
return self.getTypedRuleContext(QSharpParser.Type_ruleContext,i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def arrowType(self):
return self.getTypedRuleContext(QSharpParser.ArrowTypeContext,0)
def characteristics(self):
return self.getTypedRuleContext(QSharpParser.CharacteristicsContext,0)
def BracketLeft(self):
return self.getToken(QSharpParser.BracketLeft, 0)
def BracketRight(self):
return self.getToken(QSharpParser.BracketRight, 0)
def getRuleIndex(self):
return QSharpParser.RULE_type_rule
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterType_rule" ):
listener.enterType_rule(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitType_rule" ):
listener.exitType_rule(self)
def type_rule(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = QSharpParser.Type_ruleContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 54
self.enterRecursionRule(localctx, 54, self.RULE_type_rule, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 326
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,33,self._ctx)
if la_ == 1:
self.state = 291
self.match(QSharpParser.Underscore)
pass
elif la_ == 2:
self.state = 292
self.match(QSharpParser.TypeParameter)
pass
elif la_ == 3:
self.state = 293
self.match(QSharpParser.BigInt)
pass
elif la_ == 4:
self.state = 294
self.match(QSharpParser.Bool)
pass
elif la_ == 5:
self.state = 295
self.match(QSharpParser.Double)
pass
elif la_ == 6:
self.state = 296
self.match(QSharpParser.Int)
pass
elif la_ == 7:
self.state = 297
self.match(QSharpParser.Pauli)
pass
elif la_ == 8:
self.state = 298
self.match(QSharpParser.Qubit)
pass
elif la_ == 9:
self.state = 299
self.match(QSharpParser.Range)
pass
elif la_ == 10:
self.state = 300
self.match(QSharpParser.Result)
pass
elif la_ == 11:
self.state = 301
self.match(QSharpParser.String)
pass
elif la_ == 12:
self.state = 302
self.match(QSharpParser.Unit)
pass
elif la_ == 13:
self.state = 303
localctx.name = self.qualifiedName()
pass
elif la_ == 14:
self.state = 304
self.match(QSharpParser.ParenLeft)
self.state = 316
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.BigInt) | (1 << QSharpParser.Bool) | (1 << QSharpParser.Double) | (1 << QSharpParser.Int) | (1 << QSharpParser.Pauli) | (1 << QSharpParser.Qubit) | (1 << QSharpParser.Range) | (1 << QSharpParser.Result) | (1 << QSharpParser.String) | (1 << QSharpParser.Unit))) != 0) or ((((_la - 93)) & ~0x3f) == 0 and ((1 << (_la - 93)) & ((1 << (QSharpParser.ParenLeft - 93)) | (1 << (QSharpParser.Underscore - 93)) | (1 << (QSharpParser.Identifier - 93)) | (1 << (QSharpParser.TypeParameter - 93)))) != 0):
self.state = 305
self.type_rule(0)
self.state = 310
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,29,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 306
self.match(QSharpParser.Comma)
self.state = 307
self.type_rule(0)
self.state = 312
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,29,self._ctx)
self.state = 314
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Comma:
self.state = 313
self.match(QSharpParser.Comma)
self.state = 318
self.match(QSharpParser.ParenRight)
pass
elif la_ == 15:
self.state = 319
self.match(QSharpParser.ParenLeft)
self.state = 320
self.arrowType()
self.state = 322
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Is:
self.state = 321
self.characteristics()
self.state = 324
self.match(QSharpParser.ParenRight)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 333
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,34,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = QSharpParser.Type_ruleContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_type_rule)
self.state = 328
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 329
self.match(QSharpParser.BracketLeft)
self.state = 330
self.match(QSharpParser.BracketRight)
self.state = 335
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,34,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ArrowTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def type_rule(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.Type_ruleContext)
else:
return self.getTypedRuleContext(QSharpParser.Type_ruleContext,i)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def ArrowRight(self):
return self.getToken(QSharpParser.ArrowRight, 0)
def FatArrowRight(self):
return self.getToken(QSharpParser.FatArrowRight, 0)
def getRuleIndex(self):
return QSharpParser.RULE_arrowType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArrowType" ):
listener.enterArrowType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArrowType" ):
listener.exitArrowType(self)
def arrowType(self):
localctx = QSharpParser.ArrowTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_arrowType)
self._la = 0 # Token type
try:
self.state = 346
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,35,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 336
self.match(QSharpParser.ParenLeft)
self.state = 337
self.type_rule(0)
self.state = 338
_la = self._input.LA(1)
if not(_la==QSharpParser.ArrowRight or _la==QSharpParser.FatArrowRight):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 339
self.type_rule(0)
self.state = 340
self.match(QSharpParser.ParenRight)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 342
self.type_rule(0)
self.state = 343
_la = self._input.LA(1)
if not(_la==QSharpParser.ArrowRight or _la==QSharpParser.FatArrowRight):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 344
self.type_rule(0)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QSharpParser.RULE_statement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class MutableStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Mutable(self):
return self.getToken(QSharpParser.Mutable, 0)
def symbolBinding(self):
return self.getTypedRuleContext(QSharpParser.SymbolBindingContext,0)
def Equal(self):
return self.getToken(QSharpParser.Equal, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMutableStatement" ):
listener.enterMutableStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMutableStatement" ):
listener.exitMutableStatement(self)
class FailStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Fail(self):
return self.getToken(QSharpParser.Fail, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFailStatement" ):
listener.enterFailStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFailStatement" ):
listener.exitFailStatement(self)
class SetUpdateStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Set(self):
return self.getToken(QSharpParser.Set, 0)
def Identifier(self):
return self.getToken(QSharpParser.Identifier, 0)
def updateOperator(self):
return self.getTypedRuleContext(QSharpParser.UpdateOperatorContext,0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetUpdateStatement" ):
listener.enterSetUpdateStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetUpdateStatement" ):
listener.exitSetUpdateStatement(self)
class WhileStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def While(self):
return self.getToken(QSharpParser.While, 0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWhileStatement" ):
listener.enterWhileStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWhileStatement" ):
listener.exitWhileStatement(self)
class SetStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Set(self):
return self.getToken(QSharpParser.Set, 0)
def symbolBinding(self):
return self.getTypedRuleContext(QSharpParser.SymbolBindingContext,0)
def Equal(self):
return self.getToken(QSharpParser.Equal, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetStatement" ):
listener.enterSetStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetStatement" ):
listener.exitSetStatement(self)
class UntilStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Until(self):
return self.getToken(QSharpParser.Until, 0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def Fixup(self):
return self.getToken(QSharpParser.Fixup, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUntilStatement" ):
listener.enterUntilStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUntilStatement" ):
listener.exitUntilStatement(self)
class IfStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def If(self):
return self.getToken(QSharpParser.If, 0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIfStatement" ):
listener.enterIfStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIfStatement" ):
listener.exitIfStatement(self)
class BorrowingStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Borrowing(self):
return self.getToken(QSharpParser.Borrowing, 0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def symbolBinding(self):
return self.getTypedRuleContext(QSharpParser.SymbolBindingContext,0)
def Equal(self):
return self.getToken(QSharpParser.Equal, 0)
def qubitInitializer(self):
return self.getTypedRuleContext(QSharpParser.QubitInitializerContext,0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBorrowingStatement" ):
listener.enterBorrowingStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBorrowingStatement" ):
listener.exitBorrowingStatement(self)
class ExpressionStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpressionStatement" ):
listener.enterExpressionStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpressionStatement" ):
listener.exitExpressionStatement(self)
class ElifStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Elif(self):
return self.getToken(QSharpParser.Elif, 0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElifStatement" ):
listener.enterElifStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElifStatement" ):
listener.exitElifStatement(self)
class ElseStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Else(self):
return self.getToken(QSharpParser.Else, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElseStatement" ):
listener.enterElseStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElseStatement" ):
listener.exitElseStatement(self)
class ApplyStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Apply(self):
return self.getToken(QSharpParser.Apply, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterApplyStatement" ):
listener.enterApplyStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitApplyStatement" ):
listener.exitApplyStatement(self)
class ReturnStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.return_statement = None # Token
self.value = None # ExpressionContext
self.semicolon = None # Token
self.copyFrom(ctx)
def Return(self):
return self.getToken(QSharpParser.Return, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterReturnStatement" ):
listener.enterReturnStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitReturnStatement" ):
listener.exitReturnStatement(self)
class SetWithStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Set(self):
return self.getToken(QSharpParser.Set, 0)
def Identifier(self):
return self.getToken(QSharpParser.Identifier, 0)
def WithEqual(self):
return self.getToken(QSharpParser.WithEqual, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def ArrowLeft(self):
return self.getToken(QSharpParser.ArrowLeft, 0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetWithStatement" ):
listener.enterSetWithStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetWithStatement" ):
listener.exitSetWithStatement(self)
class LetStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.let = None # Token
self.binding = None # SymbolBindingContext
self.equals = None # Token
self.value = None # ExpressionContext
self.semicolon = None # Token
self.copyFrom(ctx)
def Let(self):
return self.getToken(QSharpParser.Let, 0)
def symbolBinding(self):
return self.getTypedRuleContext(QSharpParser.SymbolBindingContext,0)
def Equal(self):
return self.getToken(QSharpParser.Equal, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Semicolon(self):
return self.getToken(QSharpParser.Semicolon, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetStatement" ):
listener.enterLetStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetStatement" ):
listener.exitLetStatement(self)
class ForStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def For(self):
return self.getToken(QSharpParser.For, 0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def symbolBinding(self):
return self.getTypedRuleContext(QSharpParser.SymbolBindingContext,0)
def In(self):
return self.getToken(QSharpParser.In, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterForStatement" ):
listener.enterForStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitForStatement" ):
listener.exitForStatement(self)
class WithinStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Within(self):
return self.getToken(QSharpParser.Within, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWithinStatement" ):
listener.enterWithinStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWithinStatement" ):
listener.exitWithinStatement(self)
class UsingStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Using(self):
return self.getToken(QSharpParser.Using, 0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def symbolBinding(self):
return self.getTypedRuleContext(QSharpParser.SymbolBindingContext,0)
def Equal(self):
return self.getToken(QSharpParser.Equal, 0)
def qubitInitializer(self):
return self.getTypedRuleContext(QSharpParser.QubitInitializerContext,0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUsingStatement" ):
listener.enterUsingStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUsingStatement" ):
listener.exitUsingStatement(self)
class RepeatStatementContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def Repeat(self):
return self.getToken(QSharpParser.Repeat, 0)
def scope(self):
return self.getTypedRuleContext(QSharpParser.ScopeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRepeatStatement" ):
listener.enterRepeatStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRepeatStatement" ):
listener.exitRepeatStatement(self)
def statement(self):
localctx = QSharpParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_statement)
try:
self.state = 450
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,37,self._ctx)
if la_ == 1:
localctx = QSharpParser.ExpressionStatementContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 348
self.expression(0)
self.state = 349
self.match(QSharpParser.Semicolon)
pass
elif la_ == 2:
localctx = QSharpParser.ReturnStatementContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 351
localctx.return_statement = self.match(QSharpParser.Return)
self.state = 352
localctx.value = self.expression(0)
self.state = 353
localctx.semicolon = self.match(QSharpParser.Semicolon)
pass
elif la_ == 3:
localctx = QSharpParser.FailStatementContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 355
self.match(QSharpParser.Fail)
self.state = 356
self.expression(0)
self.state = 357
self.match(QSharpParser.Semicolon)
pass
elif la_ == 4:
localctx = QSharpParser.LetStatementContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 359
localctx.let = self.match(QSharpParser.Let)
self.state = 360
localctx.binding = self.symbolBinding()
self.state = 361
localctx.equals = self.match(QSharpParser.Equal)
self.state = 362
localctx.value = self.expression(0)
self.state = 363
localctx.semicolon = self.match(QSharpParser.Semicolon)
pass
elif la_ == 5:
localctx = QSharpParser.MutableStatementContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 365
self.match(QSharpParser.Mutable)
self.state = 366
self.symbolBinding()
self.state = 367
self.match(QSharpParser.Equal)
self.state = 368
self.expression(0)
self.state = 369
self.match(QSharpParser.Semicolon)
pass
elif la_ == 6:
localctx = QSharpParser.SetStatementContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 371
self.match(QSharpParser.Set)
self.state = 372
self.symbolBinding()
self.state = 373
self.match(QSharpParser.Equal)
self.state = 374
self.expression(0)
self.state = 375
self.match(QSharpParser.Semicolon)
pass
elif la_ == 7:
localctx = QSharpParser.SetUpdateStatementContext(self, localctx)
self.enterOuterAlt(localctx, 7)
self.state = 377
self.match(QSharpParser.Set)
self.state = 378
self.match(QSharpParser.Identifier)
self.state = 379
self.updateOperator()
self.state = 380
self.expression(0)
self.state = 381
self.match(QSharpParser.Semicolon)
pass
elif la_ == 8:
localctx = QSharpParser.SetWithStatementContext(self, localctx)
self.enterOuterAlt(localctx, 8)
self.state = 383
self.match(QSharpParser.Set)
self.state = 384
self.match(QSharpParser.Identifier)
self.state = 385
self.match(QSharpParser.WithEqual)
self.state = 386
self.expression(0)
self.state = 387
self.match(QSharpParser.ArrowLeft)
self.state = 388
self.expression(0)
self.state = 389
self.match(QSharpParser.Semicolon)
pass
elif la_ == 9:
localctx = QSharpParser.IfStatementContext(self, localctx)
self.enterOuterAlt(localctx, 9)
self.state = 391
self.match(QSharpParser.If)
self.state = 392
self.match(QSharpParser.ParenLeft)
self.state = 393
self.expression(0)
self.state = 394
self.match(QSharpParser.ParenRight)
self.state = 395
self.scope()
pass
elif la_ == 10:
localctx = QSharpParser.ElifStatementContext(self, localctx)
self.enterOuterAlt(localctx, 10)
self.state = 397
self.match(QSharpParser.Elif)
self.state = 398
self.match(QSharpParser.ParenLeft)
self.state = 399
self.expression(0)
self.state = 400
self.match(QSharpParser.ParenRight)
self.state = 401
self.scope()
pass
elif la_ == 11:
localctx = QSharpParser.ElseStatementContext(self, localctx)
self.enterOuterAlt(localctx, 11)
self.state = 403
self.match(QSharpParser.Else)
self.state = 404
self.scope()
pass
elif la_ == 12:
localctx = QSharpParser.ForStatementContext(self, localctx)
self.enterOuterAlt(localctx, 12)
self.state = 405
self.match(QSharpParser.For)
self.state = 406
self.match(QSharpParser.ParenLeft)
self.state = 407
self.symbolBinding()
self.state = 408
self.match(QSharpParser.In)
self.state = 409
self.expression(0)
self.state = 410
self.match(QSharpParser.ParenRight)
self.state = 411
self.scope()
pass
elif la_ == 13:
localctx = QSharpParser.WhileStatementContext(self, localctx)
self.enterOuterAlt(localctx, 13)
self.state = 413
self.match(QSharpParser.While)
self.state = 414
self.match(QSharpParser.ParenLeft)
self.state = 415
self.expression(0)
self.state = 416
self.match(QSharpParser.ParenRight)
self.state = 417
self.scope()
pass
elif la_ == 14:
localctx = QSharpParser.RepeatStatementContext(self, localctx)
self.enterOuterAlt(localctx, 14)
self.state = 419
self.match(QSharpParser.Repeat)
self.state = 420
self.scope()
pass
elif la_ == 15:
localctx = QSharpParser.UntilStatementContext(self, localctx)
self.enterOuterAlt(localctx, 15)
self.state = 421
self.match(QSharpParser.Until)
self.state = 422
self.match(QSharpParser.ParenLeft)
self.state = 423
self.expression(0)
self.state = 424
self.match(QSharpParser.ParenRight)
self.state = 428
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [QSharpParser.Semicolon]:
self.state = 425
self.match(QSharpParser.Semicolon)
pass
elif token in [QSharpParser.Fixup]:
self.state = 426
self.match(QSharpParser.Fixup)
self.state = 427
self.scope()
pass
else:
raise NoViableAltException(self)
pass
elif la_ == 16:
localctx = QSharpParser.WithinStatementContext(self, localctx)
self.enterOuterAlt(localctx, 16)
self.state = 430
self.match(QSharpParser.Within)
self.state = 431
self.scope()
pass
elif la_ == 17:
localctx = QSharpParser.ApplyStatementContext(self, localctx)
self.enterOuterAlt(localctx, 17)
self.state = 432
self.match(QSharpParser.Apply)
self.state = 433
self.scope()
pass
elif la_ == 18:
localctx = QSharpParser.UsingStatementContext(self, localctx)
self.enterOuterAlt(localctx, 18)
self.state = 434
self.match(QSharpParser.Using)
self.state = 435
self.match(QSharpParser.ParenLeft)
self.state = 436
self.symbolBinding()
self.state = 437
self.match(QSharpParser.Equal)
self.state = 438
self.qubitInitializer()
self.state = 439
self.match(QSharpParser.ParenRight)
self.state = 440
self.scope()
pass
elif la_ == 19:
localctx = QSharpParser.BorrowingStatementContext(self, localctx)
self.enterOuterAlt(localctx, 19)
self.state = 442
self.match(QSharpParser.Borrowing)
self.state = 443
self.match(QSharpParser.ParenLeft)
self.state = 444
self.symbolBinding()
self.state = 445
self.match(QSharpParser.Equal)
self.state = 446
self.qubitInitializer()
self.state = 447
self.match(QSharpParser.ParenRight)
self.state = 448
self.scope()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ScopeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.openBrace = None # Token
self._statement = None # StatementContext
self.statements = list() # of StatementContexts
self.closeBrace = None # Token
def BraceLeft(self):
return self.getToken(QSharpParser.BraceLeft, 0)
def BraceRight(self):
return self.getToken(QSharpParser.BraceRight, 0)
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.StatementContext)
else:
return self.getTypedRuleContext(QSharpParser.StatementContext,i)
def getRuleIndex(self):
return QSharpParser.RULE_scope
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterScope" ):
listener.enterScope(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitScope" ):
listener.exitScope(self)
def scope(self):
localctx = QSharpParser.ScopeContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_scope)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 452
localctx.openBrace = self.match(QSharpParser.BraceLeft)
self.state = 456
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.AdjointFunctor) | (1 << QSharpParser.Apply) | (1 << QSharpParser.Borrowing) | (1 << QSharpParser.ControlledFunctor) | (1 << QSharpParser.Elif) | (1 << QSharpParser.Else) | (1 << QSharpParser.Fail) | (1 << QSharpParser.BFalse) | (1 << QSharpParser.For) | (1 << QSharpParser.If) | (1 << QSharpParser.Let) | (1 << QSharpParser.Mutable) | (1 << QSharpParser.New) | (1 << QSharpParser.Not) | (1 << QSharpParser.One) | (1 << QSharpParser.PauliI) | (1 << QSharpParser.PauliX) | (1 << QSharpParser.PauliY) | (1 << QSharpParser.PauliZ) | (1 << QSharpParser.Repeat) | (1 << QSharpParser.Return) | (1 << QSharpParser.Set) | (1 << QSharpParser.BTrue) | (1 << QSharpParser.Until) | (1 << QSharpParser.Using) | (1 << QSharpParser.While) | (1 << QSharpParser.Within) | (1 << QSharpParser.Zero))) != 0) or ((((_la - 70)) & ~0x3f) == 0 and ((1 << (_la - 70)) & ((1 << (QSharpParser.BracketLeft - 70)) | (1 << (QSharpParser.DollarQuote - 70)) | (1 << (QSharpParser.DoubleQuote - 70)) | (1 << (QSharpParser.Ellipsis - 70)) | (1 << (QSharpParser.Minus - 70)) | (1 << (QSharpParser.ParenLeft - 70)) | (1 << (QSharpParser.TripleTilde - 70)) | (1 << (QSharpParser.Underscore - 70)) | (1 << (QSharpParser.IntegerLiteral - 70)) | (1 << (QSharpParser.BigIntegerLiteral - 70)) | (1 << (QSharpParser.DoubleLiteral - 70)) | (1 << (QSharpParser.Identifier - 70)))) != 0):
self.state = 453
localctx._statement = self.statement()
localctx.statements.append(localctx._statement)
self.state = 458
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 459
localctx.closeBrace = self.match(QSharpParser.BraceRight)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolBindingContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QSharpParser.RULE_symbolBinding
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SymbolNameContext(SymbolBindingContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.SymbolBindingContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def Identifier(self):
return self.getToken(QSharpParser.Identifier, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolName" ):
listener.enterSymbolName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolName" ):
listener.exitSymbolName(self)
class SymbolTupleContext(SymbolBindingContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.SymbolBindingContext
super().__init__(parser)
self._symbolBinding = None # SymbolBindingContext
self.bindings = list() # of SymbolBindingContexts
self.copyFrom(ctx)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def symbolBinding(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.SymbolBindingContext)
else:
return self.getTypedRuleContext(QSharpParser.SymbolBindingContext,i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolTuple" ):
listener.enterSymbolTuple(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolTuple" ):
listener.exitSymbolTuple(self)
class DiscardSymbolContext(SymbolBindingContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.SymbolBindingContext
super().__init__(parser)
self.copyFrom(ctx)
def Underscore(self):
return self.getToken(QSharpParser.Underscore, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDiscardSymbol" ):
listener.enterDiscardSymbol(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDiscardSymbol" ):
listener.exitDiscardSymbol(self)
def symbolBinding(self):
localctx = QSharpParser.SymbolBindingContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_symbolBinding)
self._la = 0 # Token type
try:
self.state = 478
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [QSharpParser.Underscore]:
localctx = QSharpParser.DiscardSymbolContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 461
self.match(QSharpParser.Underscore)
pass
elif token in [QSharpParser.Identifier]:
localctx = QSharpParser.SymbolNameContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 462
localctx.name = self.match(QSharpParser.Identifier)
pass
elif token in [QSharpParser.ParenLeft]:
localctx = QSharpParser.SymbolTupleContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 463
self.match(QSharpParser.ParenLeft)
self.state = 475
self._errHandler.sync(self)
_la = self._input.LA(1)
if ((((_la - 93)) & ~0x3f) == 0 and ((1 << (_la - 93)) & ((1 << (QSharpParser.ParenLeft - 93)) | (1 << (QSharpParser.Underscore - 93)) | (1 << (QSharpParser.Identifier - 93)))) != 0):
self.state = 464
localctx._symbolBinding = self.symbolBinding()
localctx.bindings.append(localctx._symbolBinding)
self.state = 469
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,39,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 465
self.match(QSharpParser.Comma)
self.state = 466
localctx._symbolBinding = self.symbolBinding()
localctx.bindings.append(localctx._symbolBinding)
self.state = 471
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,39,self._ctx)
self.state = 473
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Comma:
self.state = 472
self.match(QSharpParser.Comma)
self.state = 477
self.match(QSharpParser.ParenRight)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UpdateOperatorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CaretEqual(self):
return self.getToken(QSharpParser.CaretEqual, 0)
def AsteriskEqual(self):
return self.getToken(QSharpParser.AsteriskEqual, 0)
def SlashEqual(self):
return self.getToken(QSharpParser.SlashEqual, 0)
def PercentEqual(self):
return self.getToken(QSharpParser.PercentEqual, 0)
def PlusEqual(self):
return self.getToken(QSharpParser.PlusEqual, 0)
def MinusEqual(self):
return self.getToken(QSharpParser.MinusEqual, 0)
def TripleGreaterEqual(self):
return self.getToken(QSharpParser.TripleGreaterEqual, 0)
def TripleLessEqual(self):
return self.getToken(QSharpParser.TripleLessEqual, 0)
def TripleAmpersandEqual(self):
return self.getToken(QSharpParser.TripleAmpersandEqual, 0)
def TripleCaretEqual(self):
return self.getToken(QSharpParser.TripleCaretEqual, 0)
def TriplePipeEqual(self):
return self.getToken(QSharpParser.TriplePipeEqual, 0)
def AndEqual(self):
return self.getToken(QSharpParser.AndEqual, 0)
def OrEqual(self):
return self.getToken(QSharpParser.OrEqual, 0)
def getRuleIndex(self):
return QSharpParser.RULE_updateOperator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUpdateOperator" ):
listener.enterUpdateOperator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUpdateOperator" ):
listener.exitUpdateOperator(self)
def updateOperator(self):
localctx = QSharpParser.UpdateOperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_updateOperator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 480
_la = self._input.LA(1)
if not(((((_la - 61)) & ~0x3f) == 0 and ((1 << (_la - 61)) & ((1 << (QSharpParser.AndEqual - 61)) | (1 << (QSharpParser.AsteriskEqual - 61)) | (1 << (QSharpParser.CaretEqual - 61)) | (1 << (QSharpParser.MinusEqual - 61)) | (1 << (QSharpParser.OrEqual - 61)) | (1 << (QSharpParser.PercentEqual - 61)) | (1 << (QSharpParser.PlusEqual - 61)) | (1 << (QSharpParser.SlashEqual - 61)) | (1 << (QSharpParser.TripleAmpersandEqual - 61)) | (1 << (QSharpParser.TripleCaretEqual - 61)) | (1 << (QSharpParser.TripleGreaterEqual - 61)) | (1 << (QSharpParser.TripleLessEqual - 61)) | (1 << (QSharpParser.TriplePipeEqual - 61)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QubitInitializerContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Qubit(self):
return self.getToken(QSharpParser.Qubit, 0)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def BracketLeft(self):
return self.getToken(QSharpParser.BracketLeft, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def BracketRight(self):
return self.getToken(QSharpParser.BracketRight, 0)
def qubitInitializer(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.QubitInitializerContext)
else:
return self.getTypedRuleContext(QSharpParser.QubitInitializerContext,i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def getRuleIndex(self):
return QSharpParser.RULE_qubitInitializer
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQubitInitializer" ):
listener.enterQubitInitializer(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQubitInitializer" ):
listener.exitQubitInitializer(self)
def qubitInitializer(self):
localctx = QSharpParser.QubitInitializerContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_qubitInitializer)
self._la = 0 # Token type
try:
self.state = 505
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,46,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 482
self.match(QSharpParser.Qubit)
self.state = 483
self.match(QSharpParser.ParenLeft)
self.state = 484
self.match(QSharpParser.ParenRight)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 485
self.match(QSharpParser.Qubit)
self.state = 486
self.match(QSharpParser.BracketLeft)
self.state = 487
self.expression(0)
self.state = 488
self.match(QSharpParser.BracketRight)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 490
self.match(QSharpParser.ParenLeft)
self.state = 502
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Qubit or _la==QSharpParser.ParenLeft:
self.state = 491
self.qubitInitializer()
self.state = 496
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,43,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 492
self.match(QSharpParser.Comma)
self.state = 493
self.qubitInitializer()
self.state = 498
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,43,self._ctx)
self.state = 500
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Comma:
self.state = 499
self.match(QSharpParser.Comma)
self.state = 504
self.match(QSharpParser.ParenRight)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return QSharpParser.RULE_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ItemAccessExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def DoubleColon(self):
return self.getToken(QSharpParser.DoubleColon, 0)
def Identifier(self):
return self.getToken(QSharpParser.Identifier, 0)
def BracketLeft(self):
return self.getToken(QSharpParser.BracketLeft, 0)
def BracketRight(self):
return self.getToken(QSharpParser.BracketRight, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterItemAccessExpression" ):
listener.enterItemAccessExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitItemAccessExpression" ):
listener.exitItemAccessExpression(self)
class BitwiseXorExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def TripleCaret(self):
return self.getToken(QSharpParser.TripleCaret, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBitwiseXorExpression" ):
listener.enterBitwiseXorExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBitwiseXorExpression" ):
listener.exitBitwiseXorExpression(self)
class DoubleExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def DoubleLiteral(self):
return self.getToken(QSharpParser.DoubleLiteral, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDoubleExpression" ):
listener.enterDoubleExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDoubleExpression" ):
listener.exitDoubleExpression(self)
class TupleExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.openParen = None # Token
self._expression = None # ExpressionContext
self.items = list() # of ExpressionContexts
self.s75 = None # Token
self.commas = list() # of Tokens
self.closeParen = None # Token
self.copyFrom(ctx)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTupleExpression" ):
listener.enterTupleExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTupleExpression" ):
listener.exitTupleExpression(self)
class RangeExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def DoubleDot(self):
return self.getToken(QSharpParser.DoubleDot, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRangeExpression" ):
listener.enterRangeExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRangeExpression" ):
listener.exitRangeExpression(self)
class CompareExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def Greater(self):
return self.getToken(QSharpParser.Greater, 0)
def Less(self):
return self.getToken(QSharpParser.Less, 0)
def GreaterEqual(self):
return self.getToken(QSharpParser.GreaterEqual, 0)
def LessEqual(self):
return self.getToken(QSharpParser.LessEqual, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCompareExpression" ):
listener.enterCompareExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCompareExpression" ):
listener.exitCompareExpression(self)
class OrExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def Or(self):
return self.getToken(QSharpParser.Or, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOrExpression" ):
listener.enterOrExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOrExpression" ):
listener.exitOrExpression(self)
class InterpStringExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def DollarQuote(self):
return self.getToken(QSharpParser.DollarQuote, 0)
def InterpDoubleQuote(self):
return self.getToken(QSharpParser.InterpDoubleQuote, 0)
def interpStringContent(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.InterpStringContentContext)
else:
return self.getTypedRuleContext(QSharpParser.InterpStringContentContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInterpStringExpression" ):
listener.enterInterpStringExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInterpStringExpression" ):
listener.exitInterpStringExpression(self)
class BoolExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def boolLiteral(self):
return self.getTypedRuleContext(QSharpParser.BoolLiteralContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBoolExpression" ):
listener.enterBoolExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBoolExpression" ):
listener.exitBoolExpression(self)
class OpenRangeExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def Ellipsis(self):
return self.getToken(QSharpParser.Ellipsis, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOpenRangeExpression" ):
listener.enterOpenRangeExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOpenRangeExpression" ):
listener.exitOpenRangeExpression(self)
class AndExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def And(self):
return self.getToken(QSharpParser.And, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAndExpression" ):
listener.enterAndExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAndExpression" ):
listener.exitAndExpression(self)
class ResultExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def resultLiteral(self):
return self.getTypedRuleContext(QSharpParser.ResultLiteralContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterResultExpression" ):
listener.enterResultExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitResultExpression" ):
listener.exitResultExpression(self)
class NegationExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Minus(self):
return self.getToken(QSharpParser.Minus, 0)
def Not(self):
return self.getToken(QSharpParser.Not, 0)
def TripleTilde(self):
return self.getToken(QSharpParser.TripleTilde, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNegationExpression" ):
listener.enterNegationExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNegationExpression" ):
listener.exitNegationExpression(self)
class UpdateExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.record = None # ExpressionContext
self.with_slash = None # Token
self.item = None # ExpressionContext
self.arrow = None # Token
self.value = None # ExpressionContext
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def With(self):
return self.getToken(QSharpParser.With, 0)
def ArrowLeft(self):
return self.getToken(QSharpParser.ArrowLeft, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUpdateExpression" ):
listener.enterUpdateExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUpdateExpression" ):
listener.exitUpdateExpression(self)
class CallExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def ParenLeft(self):
return self.getToken(QSharpParser.ParenLeft, 0)
def ParenRight(self):
return self.getToken(QSharpParser.ParenRight, 0)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCallExpression" ):
listener.enterCallExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCallExpression" ):
listener.exitCallExpression(self)
class BitwiseOrExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def TriplePipe(self):
return self.getToken(QSharpParser.TriplePipe, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBitwiseOrExpression" ):
listener.enterBitwiseOrExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBitwiseOrExpression" ):
listener.exitBitwiseOrExpression(self)
class LeftOpenRangeExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def Ellipsis(self):
return self.getToken(QSharpParser.Ellipsis, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLeftOpenRangeExpression" ):
listener.enterLeftOpenRangeExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLeftOpenRangeExpression" ):
listener.exitLeftOpenRangeExpression(self)
class ExponentExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def Caret(self):
return self.getToken(QSharpParser.Caret, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExponentExpression" ):
listener.enterExponentExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExponentExpression" ):
listener.exitExponentExpression(self)
class MultiplyExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def Asterisk(self):
return self.getToken(QSharpParser.Asterisk, 0)
def Slash(self):
return self.getToken(QSharpParser.Slash, 0)
def Percent(self):
return self.getToken(QSharpParser.Percent, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMultiplyExpression" ):
listener.enterMultiplyExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMultiplyExpression" ):
listener.exitMultiplyExpression(self)
class ConditionalExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def Question(self):
return self.getToken(QSharpParser.Question, 0)
def Pipe(self):
return self.getToken(QSharpParser.Pipe, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConditionalExpression" ):
listener.enterConditionalExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConditionalExpression" ):
listener.exitConditionalExpression(self)
class UnwrapExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Bang(self):
return self.getToken(QSharpParser.Bang, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnwrapExpression" ):
listener.enterUnwrapExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnwrapExpression" ):
listener.exitUnwrapExpression(self)
class ControlledExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def ControlledFunctor(self):
return self.getToken(QSharpParser.ControlledFunctor, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterControlledExpression" ):
listener.enterControlledExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitControlledExpression" ):
listener.exitControlledExpression(self)
class IntegerExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def IntegerLiteral(self):
return self.getToken(QSharpParser.IntegerLiteral, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntegerExpression" ):
listener.enterIntegerExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntegerExpression" ):
listener.exitIntegerExpression(self)
class ShiftExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def TripleGreater(self):
return self.getToken(QSharpParser.TripleGreater, 0)
def TripleLess(self):
return self.getToken(QSharpParser.TripleLess, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShiftExpression" ):
listener.enterShiftExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShiftExpression" ):
listener.exitShiftExpression(self)
class IdentifierExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.name = None # QualifiedNameContext
self.copyFrom(ctx)
def qualifiedName(self):
return self.getTypedRuleContext(QSharpParser.QualifiedNameContext,0)
def Less(self):
return self.getToken(QSharpParser.Less, 0)
def Greater(self):
return self.getToken(QSharpParser.Greater, 0)
def type_rule(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.Type_ruleContext)
else:
return self.getTypedRuleContext(QSharpParser.Type_ruleContext,i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIdentifierExpression" ):
listener.enterIdentifierExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIdentifierExpression" ):
listener.exitIdentifierExpression(self)
class PauliExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def pauliLiteral(self):
return self.getTypedRuleContext(QSharpParser.PauliLiteralContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPauliExpression" ):
listener.enterPauliExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPauliExpression" ):
listener.exitPauliExpression(self)
class BitwiseAndExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def TripleAmpersand(self):
return self.getToken(QSharpParser.TripleAmpersand, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBitwiseAndExpression" ):
listener.enterBitwiseAndExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBitwiseAndExpression" ):
listener.exitBitwiseAndExpression(self)
class AddExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.left = None # ExpressionContext
self.operator = None # Token
self.right = None # ExpressionContext
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def Plus(self):
return self.getToken(QSharpParser.Plus, 0)
def Minus(self):
return self.getToken(QSharpParser.Minus, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAddExpression" ):
listener.enterAddExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAddExpression" ):
listener.exitAddExpression(self)
class StringExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def DoubleQuote(self):
return self.getToken(QSharpParser.DoubleQuote, 0)
def StringDoubleQuote(self):
return self.getToken(QSharpParser.StringDoubleQuote, 0)
def stringContent(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.StringContentContext)
else:
return self.getTypedRuleContext(QSharpParser.StringContentContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringExpression" ):
listener.enterStringExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringExpression" ):
listener.exitStringExpression(self)
class RightOpenRangeExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def Ellipsis(self):
return self.getToken(QSharpParser.Ellipsis, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRightOpenRangeExpression" ):
listener.enterRightOpenRangeExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRightOpenRangeExpression" ):
listener.exitRightOpenRangeExpression(self)
class ArrayExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def BracketLeft(self):
return self.getToken(QSharpParser.BracketLeft, 0)
def BracketRight(self):
return self.getToken(QSharpParser.BracketRight, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def Comma(self, i:int=None):
if i is None:
return self.getTokens(QSharpParser.Comma)
else:
return self.getToken(QSharpParser.Comma, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArrayExpression" ):
listener.enterArrayExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArrayExpression" ):
listener.exitArrayExpression(self)
class MissingExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def Underscore(self):
return self.getToken(QSharpParser.Underscore, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMissingExpression" ):
listener.enterMissingExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMissingExpression" ):
listener.exitMissingExpression(self)
class BigIntegerExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def BigIntegerLiteral(self):
return self.getToken(QSharpParser.BigIntegerLiteral, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBigIntegerExpression" ):
listener.enterBigIntegerExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBigIntegerExpression" ):
listener.exitBigIntegerExpression(self)
class NewArrayExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def New(self):
return self.getToken(QSharpParser.New, 0)
def type_rule(self):
return self.getTypedRuleContext(QSharpParser.Type_ruleContext,0)
def BracketLeft(self):
return self.getToken(QSharpParser.BracketLeft, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def BracketRight(self):
return self.getToken(QSharpParser.BracketRight, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNewArrayExpression" ):
listener.enterNewArrayExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNewArrayExpression" ):
listener.exitNewArrayExpression(self)
class AdjointExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def AdjointFunctor(self):
return self.getToken(QSharpParser.AdjointFunctor, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAdjointExpression" ):
listener.enterAdjointExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAdjointExpression" ):
listener.exitAdjointExpression(self)
class EqualsExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a QSharpParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(QSharpParser.ExpressionContext)
else:
return self.getTypedRuleContext(QSharpParser.ExpressionContext,i)
def DoubleEqual(self):
return self.getToken(QSharpParser.DoubleEqual, 0)
def NotEqual(self):
return self.getToken(QSharpParser.NotEqual, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEqualsExpression" ):
listener.enterEqualsExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEqualsExpression" ):
listener.exitEqualsExpression(self)
def expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = QSharpParser.ExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 68
self.enterRecursionRule(localctx, 68, self.RULE_expression, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 594
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,59,self._ctx)
if la_ == 1:
localctx = QSharpParser.MissingExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 508
self.match(QSharpParser.Underscore)
pass
elif la_ == 2:
localctx = QSharpParser.IdentifierExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 509
localctx.name = self.qualifiedName()
self.state = 525
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,50,self._ctx)
if la_ == 1:
self.state = 510
self.match(QSharpParser.Less)
self.state = 522
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.BigInt) | (1 << QSharpParser.Bool) | (1 << QSharpParser.Double) | (1 << QSharpParser.Int) | (1 << QSharpParser.Pauli) | (1 << QSharpParser.Qubit) | (1 << QSharpParser.Range) | (1 << QSharpParser.Result) | (1 << QSharpParser.String) | (1 << QSharpParser.Unit))) != 0) or ((((_la - 93)) & ~0x3f) == 0 and ((1 << (_la - 93)) & ((1 << (QSharpParser.ParenLeft - 93)) | (1 << (QSharpParser.Underscore - 93)) | (1 << (QSharpParser.Identifier - 93)) | (1 << (QSharpParser.TypeParameter - 93)))) != 0):
self.state = 511
self.type_rule(0)
self.state = 516
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,47,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 512
self.match(QSharpParser.Comma)
self.state = 513
self.type_rule(0)
self.state = 518
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,47,self._ctx)
self.state = 520
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Comma:
self.state = 519
self.match(QSharpParser.Comma)
self.state = 524
self.match(QSharpParser.Greater)
pass
elif la_ == 3:
localctx = QSharpParser.IntegerExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 527
localctx.value = self.match(QSharpParser.IntegerLiteral)
pass
elif la_ == 4:
localctx = QSharpParser.BigIntegerExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 528
self.match(QSharpParser.BigIntegerLiteral)
pass
elif la_ == 5:
localctx = QSharpParser.DoubleExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 529
self.match(QSharpParser.DoubleLiteral)
pass
elif la_ == 6:
localctx = QSharpParser.StringExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 530
self.match(QSharpParser.DoubleQuote)
self.state = 534
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==QSharpParser.StringEscape or _la==QSharpParser.StringText:
self.state = 531
self.stringContent()
self.state = 536
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 537
self.match(QSharpParser.StringDoubleQuote)
pass
elif la_ == 7:
localctx = QSharpParser.InterpStringExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 538
self.match(QSharpParser.DollarQuote)
self.state = 542
self._errHandler.sync(self)
_la = self._input.LA(1)
while ((((_la - 131)) & ~0x3f) == 0 and ((1 << (_la - 131)) & ((1 << (QSharpParser.InterpStringEscape - 131)) | (1 << (QSharpParser.InterpBraceLeft - 131)) | (1 << (QSharpParser.InterpStringText - 131)))) != 0):
self.state = 539
self.interpStringContent()
self.state = 544
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 545
self.match(QSharpParser.InterpDoubleQuote)
pass
elif la_ == 8:
localctx = QSharpParser.BoolExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 546
self.boolLiteral()
pass
elif la_ == 9:
localctx = QSharpParser.ResultExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 547
self.resultLiteral()
pass
elif la_ == 10:
localctx = QSharpParser.PauliExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 548
self.pauliLiteral()
pass
elif la_ == 11:
localctx = QSharpParser.TupleExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 549
localctx.openParen = self.match(QSharpParser.ParenLeft)
self.state = 561
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.AdjointFunctor) | (1 << QSharpParser.ControlledFunctor) | (1 << QSharpParser.BFalse) | (1 << QSharpParser.New) | (1 << QSharpParser.Not) | (1 << QSharpParser.One) | (1 << QSharpParser.PauliI) | (1 << QSharpParser.PauliX) | (1 << QSharpParser.PauliY) | (1 << QSharpParser.PauliZ) | (1 << QSharpParser.BTrue) | (1 << QSharpParser.Zero))) != 0) or ((((_la - 70)) & ~0x3f) == 0 and ((1 << (_la - 70)) & ((1 << (QSharpParser.BracketLeft - 70)) | (1 << (QSharpParser.DollarQuote - 70)) | (1 << (QSharpParser.DoubleQuote - 70)) | (1 << (QSharpParser.Ellipsis - 70)) | (1 << (QSharpParser.Minus - 70)) | (1 << (QSharpParser.ParenLeft - 70)) | (1 << (QSharpParser.TripleTilde - 70)) | (1 << (QSharpParser.Underscore - 70)) | (1 << (QSharpParser.IntegerLiteral - 70)) | (1 << (QSharpParser.BigIntegerLiteral - 70)) | (1 << (QSharpParser.DoubleLiteral - 70)) | (1 << (QSharpParser.Identifier - 70)))) != 0):
self.state = 550
localctx._expression = self.expression(0)
localctx.items.append(localctx._expression)
self.state = 555
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,53,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 551
localctx.s75 = self.match(QSharpParser.Comma)
localctx.commas.append(localctx.s75)
self.state = 552
localctx._expression = self.expression(0)
localctx.items.append(localctx._expression)
self.state = 557
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,53,self._ctx)
self.state = 559
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Comma:
self.state = 558
localctx.s75 = self.match(QSharpParser.Comma)
localctx.commas.append(localctx.s75)
self.state = 563
localctx.closeParen = self.match(QSharpParser.ParenRight)
pass
elif la_ == 12:
localctx = QSharpParser.ArrayExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 564
self.match(QSharpParser.BracketLeft)
self.state = 576
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.AdjointFunctor) | (1 << QSharpParser.ControlledFunctor) | (1 << QSharpParser.BFalse) | (1 << QSharpParser.New) | (1 << QSharpParser.Not) | (1 << QSharpParser.One) | (1 << QSharpParser.PauliI) | (1 << QSharpParser.PauliX) | (1 << QSharpParser.PauliY) | (1 << QSharpParser.PauliZ) | (1 << QSharpParser.BTrue) | (1 << QSharpParser.Zero))) != 0) or ((((_la - 70)) & ~0x3f) == 0 and ((1 << (_la - 70)) & ((1 << (QSharpParser.BracketLeft - 70)) | (1 << (QSharpParser.DollarQuote - 70)) | (1 << (QSharpParser.DoubleQuote - 70)) | (1 << (QSharpParser.Ellipsis - 70)) | (1 << (QSharpParser.Minus - 70)) | (1 << (QSharpParser.ParenLeft - 70)) | (1 << (QSharpParser.TripleTilde - 70)) | (1 << (QSharpParser.Underscore - 70)) | (1 << (QSharpParser.IntegerLiteral - 70)) | (1 << (QSharpParser.BigIntegerLiteral - 70)) | (1 << (QSharpParser.DoubleLiteral - 70)) | (1 << (QSharpParser.Identifier - 70)))) != 0):
self.state = 565
self.expression(0)
self.state = 570
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,56,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 566
self.match(QSharpParser.Comma)
self.state = 567
self.expression(0)
self.state = 572
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,56,self._ctx)
self.state = 574
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Comma:
self.state = 573
self.match(QSharpParser.Comma)
self.state = 578
self.match(QSharpParser.BracketRight)
pass
elif la_ == 13:
localctx = QSharpParser.NewArrayExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 579
self.match(QSharpParser.New)
self.state = 580
self.type_rule(0)
self.state = 581
self.match(QSharpParser.BracketLeft)
self.state = 582
self.expression(0)
self.state = 583
self.match(QSharpParser.BracketRight)
pass
elif la_ == 14:
localctx = QSharpParser.ControlledExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 585
self.match(QSharpParser.ControlledFunctor)
self.state = 586
self.expression(21)
pass
elif la_ == 15:
localctx = QSharpParser.AdjointExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 587
self.match(QSharpParser.AdjointFunctor)
self.state = 588
self.expression(20)
pass
elif la_ == 16:
localctx = QSharpParser.NegationExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 589
_la = self._input.LA(1)
if not(_la==QSharpParser.Not or _la==QSharpParser.Minus or _la==QSharpParser.TripleTilde):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 590
self.expression(18)
pass
elif la_ == 17:
localctx = QSharpParser.LeftOpenRangeExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 591
self.match(QSharpParser.Ellipsis)
self.state = 592
self.expression(3)
pass
elif la_ == 18:
localctx = QSharpParser.OpenRangeExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 593
self.match(QSharpParser.Ellipsis)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 675
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,65,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 673
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,64,self._ctx)
if la_ == 1:
localctx = QSharpParser.ExponentExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 596
if not self.precpred(self._ctx, 17):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 17)")
self.state = 597
self.match(QSharpParser.Caret)
self.state = 598
self.expression(17)
pass
elif la_ == 2:
localctx = QSharpParser.MultiplyExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 599
if not self.precpred(self._ctx, 16):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 16)")
self.state = 600
_la = self._input.LA(1)
if not(((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (QSharpParser.Asterisk - 64)) | (1 << (QSharpParser.Percent - 64)) | (1 << (QSharpParser.Slash - 64)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 601
self.expression(17)
pass
elif la_ == 3:
localctx = QSharpParser.AddExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 602
if not self.precpred(self._ctx, 15):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 15)")
self.state = 603
localctx.operator = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==QSharpParser.Minus or _la==QSharpParser.Plus):
localctx.operator = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 604
localctx.right = self.expression(16)
pass
elif la_ == 4:
localctx = QSharpParser.ShiftExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 605
if not self.precpred(self._ctx, 14):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 14)")
self.state = 606
_la = self._input.LA(1)
if not(_la==QSharpParser.TripleGreater or _la==QSharpParser.TripleLess):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 607
self.expression(15)
pass
elif la_ == 5:
localctx = QSharpParser.CompareExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 608
if not self.precpred(self._ctx, 13):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 13)")
self.state = 609
_la = self._input.LA(1)
if not(((((_la - 85)) & ~0x3f) == 0 and ((1 << (_la - 85)) & ((1 << (QSharpParser.Greater - 85)) | (1 << (QSharpParser.GreaterEqual - 85)) | (1 << (QSharpParser.Less - 85)) | (1 << (QSharpParser.LessEqual - 85)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 610
self.expression(14)
pass
elif la_ == 6:
localctx = QSharpParser.EqualsExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 611
if not self.precpred(self._ctx, 12):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 12)")
self.state = 612
_la = self._input.LA(1)
if not(_la==QSharpParser.DoubleEqual or _la==QSharpParser.NotEqual):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 613
self.expression(13)
pass
elif la_ == 7:
localctx = QSharpParser.BitwiseAndExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 614
if not self.precpred(self._ctx, 11):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 11)")
self.state = 615
self.match(QSharpParser.TripleAmpersand)
self.state = 616
self.expression(12)
pass
elif la_ == 8:
localctx = QSharpParser.BitwiseXorExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 617
if not self.precpred(self._ctx, 10):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 10)")
self.state = 618
self.match(QSharpParser.TripleCaret)
self.state = 619
self.expression(11)
pass
elif la_ == 9:
localctx = QSharpParser.BitwiseOrExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 620
if not self.precpred(self._ctx, 9):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 9)")
self.state = 621
self.match(QSharpParser.TriplePipe)
self.state = 622
self.expression(10)
pass
elif la_ == 10:
localctx = QSharpParser.AndExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 623
if not self.precpred(self._ctx, 8):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 8)")
self.state = 624
self.match(QSharpParser.And)
self.state = 625
self.expression(9)
pass
elif la_ == 11:
localctx = QSharpParser.OrExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 626
if not self.precpred(self._ctx, 7):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 7)")
self.state = 627
self.match(QSharpParser.Or)
self.state = 628
self.expression(8)
pass
elif la_ == 12:
localctx = QSharpParser.ConditionalExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 629
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 630
self.match(QSharpParser.Question)
self.state = 631
self.expression(0)
self.state = 632
self.match(QSharpParser.Pipe)
self.state = 633
self.expression(6)
pass
elif la_ == 13:
localctx = QSharpParser.RangeExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 635
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 636
self.match(QSharpParser.DoubleDot)
self.state = 637
self.expression(6)
pass
elif la_ == 14:
localctx = QSharpParser.UpdateExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
localctx.record = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 638
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 639
localctx.with_slash = self.match(QSharpParser.With)
self.state = 640
localctx.item = self.expression(0)
self.state = 641
localctx.arrow = self.match(QSharpParser.ArrowLeft)
self.state = 642
localctx.value = self.expression(2)
pass
elif la_ == 15:
localctx = QSharpParser.ItemAccessExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 644
if not self.precpred(self._ctx, 23):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 23)")
self.state = 651
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [QSharpParser.DoubleColon]:
self.state = 645
self.match(QSharpParser.DoubleColon)
self.state = 646
self.match(QSharpParser.Identifier)
pass
elif token in [QSharpParser.BracketLeft]:
self.state = 647
self.match(QSharpParser.BracketLeft)
self.state = 648
self.expression(0)
self.state = 649
self.match(QSharpParser.BracketRight)
pass
else:
raise NoViableAltException(self)
pass
elif la_ == 16:
localctx = QSharpParser.UnwrapExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 653
if not self.precpred(self._ctx, 22):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 22)")
self.state = 654
self.match(QSharpParser.Bang)
pass
elif la_ == 17:
localctx = QSharpParser.CallExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 655
if not self.precpred(self._ctx, 19):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 19)")
self.state = 656
self.match(QSharpParser.ParenLeft)
self.state = 668
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.AdjointFunctor) | (1 << QSharpParser.ControlledFunctor) | (1 << QSharpParser.BFalse) | (1 << QSharpParser.New) | (1 << QSharpParser.Not) | (1 << QSharpParser.One) | (1 << QSharpParser.PauliI) | (1 << QSharpParser.PauliX) | (1 << QSharpParser.PauliY) | (1 << QSharpParser.PauliZ) | (1 << QSharpParser.BTrue) | (1 << QSharpParser.Zero))) != 0) or ((((_la - 70)) & ~0x3f) == 0 and ((1 << (_la - 70)) & ((1 << (QSharpParser.BracketLeft - 70)) | (1 << (QSharpParser.DollarQuote - 70)) | (1 << (QSharpParser.DoubleQuote - 70)) | (1 << (QSharpParser.Ellipsis - 70)) | (1 << (QSharpParser.Minus - 70)) | (1 << (QSharpParser.ParenLeft - 70)) | (1 << (QSharpParser.TripleTilde - 70)) | (1 << (QSharpParser.Underscore - 70)) | (1 << (QSharpParser.IntegerLiteral - 70)) | (1 << (QSharpParser.BigIntegerLiteral - 70)) | (1 << (QSharpParser.DoubleLiteral - 70)) | (1 << (QSharpParser.Identifier - 70)))) != 0):
self.state = 657
self.expression(0)
self.state = 662
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,61,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 658
self.match(QSharpParser.Comma)
self.state = 659
self.expression(0)
self.state = 664
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,61,self._ctx)
self.state = 666
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==QSharpParser.Comma:
self.state = 665
self.match(QSharpParser.Comma)
self.state = 670
self.match(QSharpParser.ParenRight)
pass
elif la_ == 18:
localctx = QSharpParser.RightOpenRangeExpressionContext(self, QSharpParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 671
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 672
self.match(QSharpParser.Ellipsis)
pass
self.state = 677
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,65,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class BoolLiteralContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BFalse(self):
return self.getToken(QSharpParser.BFalse, 0)
def BTrue(self):
return self.getToken(QSharpParser.BTrue, 0)
def getRuleIndex(self):
return QSharpParser.RULE_boolLiteral
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBoolLiteral" ):
listener.enterBoolLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBoolLiteral" ):
listener.exitBoolLiteral(self)
def boolLiteral(self):
localctx = QSharpParser.BoolLiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_boolLiteral)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 678
_la = self._input.LA(1)
if not(_la==QSharpParser.BFalse or _la==QSharpParser.BTrue):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ResultLiteralContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Zero(self):
return self.getToken(QSharpParser.Zero, 0)
def One(self):
return self.getToken(QSharpParser.One, 0)
def getRuleIndex(self):
return QSharpParser.RULE_resultLiteral
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterResultLiteral" ):
listener.enterResultLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitResultLiteral" ):
listener.exitResultLiteral(self)
def resultLiteral(self):
localctx = QSharpParser.ResultLiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_resultLiteral)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 680
_la = self._input.LA(1)
if not(_la==QSharpParser.One or _la==QSharpParser.Zero):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PauliLiteralContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def PauliI(self):
return self.getToken(QSharpParser.PauliI, 0)
def PauliX(self):
return self.getToken(QSharpParser.PauliX, 0)
def PauliY(self):
return self.getToken(QSharpParser.PauliY, 0)
def PauliZ(self):
return self.getToken(QSharpParser.PauliZ, 0)
def getRuleIndex(self):
return QSharpParser.RULE_pauliLiteral
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPauliLiteral" ):
listener.enterPauliLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPauliLiteral" ):
listener.exitPauliLiteral(self)
def pauliLiteral(self):
localctx = QSharpParser.PauliLiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_pauliLiteral)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 682
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << QSharpParser.PauliI) | (1 << QSharpParser.PauliX) | (1 << QSharpParser.PauliY) | (1 << QSharpParser.PauliZ))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StringContentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def StringEscape(self):
return self.getToken(QSharpParser.StringEscape, 0)
def StringText(self):
return self.getToken(QSharpParser.StringText, 0)
def getRuleIndex(self):
return QSharpParser.RULE_stringContent
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringContent" ):
listener.enterStringContent(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringContent" ):
listener.exitStringContent(self)
def stringContent(self):
localctx = QSharpParser.StringContentContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_stringContent)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 684
_la = self._input.LA(1)
if not(_la==QSharpParser.StringEscape or _la==QSharpParser.StringText):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InterpStringContentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def InterpStringEscape(self):
return self.getToken(QSharpParser.InterpStringEscape, 0)
def InterpBraceLeft(self):
return self.getToken(QSharpParser.InterpBraceLeft, 0)
def expression(self):
return self.getTypedRuleContext(QSharpParser.ExpressionContext,0)
def BraceRight(self):
return self.getToken(QSharpParser.BraceRight, 0)
def InterpStringText(self):
return self.getToken(QSharpParser.InterpStringText, 0)
def getRuleIndex(self):
return QSharpParser.RULE_interpStringContent
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInterpStringContent" ):
listener.enterInterpStringContent(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInterpStringContent" ):
listener.exitInterpStringContent(self)
def interpStringContent(self):
localctx = QSharpParser.InterpStringContentContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_interpStringContent)
try:
self.state = 692
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [QSharpParser.InterpStringEscape]:
self.enterOuterAlt(localctx, 1)
self.state = 686
self.match(QSharpParser.InterpStringEscape)
pass
elif token in [QSharpParser.InterpBraceLeft]:
self.enterOuterAlt(localctx, 2)
self.state = 687
self.match(QSharpParser.InterpBraceLeft)
self.state = 688
self.expression(0)
self.state = 689
self.match(QSharpParser.BraceRight)
pass
elif token in [QSharpParser.InterpStringText]:
self.enterOuterAlt(localctx, 3)
self.state = 691
self.match(QSharpParser.InterpStringText)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[19] = self.characteristicsExpression_sempred
self._predicates[27] = self.type_rule_sempred
self._predicates[34] = self.expression_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def characteristicsExpression_sempred(self, localctx:CharacteristicsExpressionContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 2)
if predIndex == 1:
return self.precpred(self._ctx, 1)
def type_rule_sempred(self, localctx:Type_ruleContext, predIndex:int):
if predIndex == 2:
return self.precpred(self._ctx, 1)
def expression_sempred(self, localctx:ExpressionContext, predIndex:int):
if predIndex == 3:
return self.precpred(self._ctx, 17)
if predIndex == 4:
return self.precpred(self._ctx, 16)
if predIndex == 5:
return self.precpred(self._ctx, 15)
if predIndex == 6:
return self.precpred(self._ctx, 14)
if predIndex == 7:
return self.precpred(self._ctx, 13)
if predIndex == 8:
return self.precpred(self._ctx, 12)
if predIndex == 9:
return self.precpred(self._ctx, 11)
if predIndex == 10:
return self.precpred(self._ctx, 10)
if predIndex == 11:
return self.precpred(self._ctx, 9)
if predIndex == 12:
return self.precpred(self._ctx, 8)
if predIndex == 13:
return self.precpred(self._ctx, 7)
if predIndex == 14:
return self.precpred(self._ctx, 6)
if predIndex == 15:
return self.precpred(self._ctx, 5)
if predIndex == 16:
return self.precpred(self._ctx, 1)
if predIndex == 17:
return self.precpred(self._ctx, 23)
if predIndex == 18:
return self.precpred(self._ctx, 22)
if predIndex == 19:
return self.precpred(self._ctx, 19)
if predIndex == 20:
return self.precpred(self._ctx, 4)
| 40.019822
| 1,419
| 0.58037
|
f693e9c0c4f7bec988dd5a86415ebc79458b6ea0
| 994
|
py
|
Python
|
kubernetes_asyncio/test/test_v1beta1_cron_job_spec.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_v1beta1_cron_job_spec.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_v1beta1_cron_job_spec.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1beta1_cron_job_spec import V1beta1CronJobSpec # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1beta1CronJobSpec(unittest.TestCase):
"""V1beta1CronJobSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CronJobSpec(self):
"""Test V1beta1CronJobSpec"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1beta1_cron_job_spec.V1beta1CronJobSpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.85
| 124
| 0.7334
|
d47c913f8c6674157d2a4dc4bd3a9fa4ca94ba46
| 4,726
|
py
|
Python
|
bin/plans_cli.py
|
arup-group/london-pop-synth
|
38e56230d440d49ddb2e2841d46a5cbaab260c35
|
[
"MIT"
] | 1
|
2020-11-25T06:56:43.000Z
|
2020-11-25T06:56:43.000Z
|
bin/plans_cli.py
|
arup-group/london-pop-synth
|
38e56230d440d49ddb2e2841d46a5cbaab260c35
|
[
"MIT"
] | null | null | null |
bin/plans_cli.py
|
arup-group/london-pop-synth
|
38e56230d440d49ddb2e2841d46a5cbaab260c35
|
[
"MIT"
] | null | null | null |
"""
Fred Shone
20th Nov 2018
Command line script for converting TFL csv simulated population data into Matsim xml format
- Sampling
- Activity inference
- Mode conversion
- Activity inference validation
- Write to xml format for MATSim
- Write to csv format for viz
"""
import argparse
import os
from mimi.core import samplers, output
from mimi.lopops import lopops
from input.config import LoPopSConfig
def get_args():
"""
Gets command line args. Includes a number of defaults for input and output paths.
:return: argparse arguments object
"""
population_input = os.path.join('data', 'plans',
'TravelPlans.csv')
geo_input = os.path.join('data', 'plans',
'zoneSystem',
'ABM_Zones_002_LSOA.shp')
parser = argparse.ArgumentParser()
parser.add_argument('--out', '-O',
default='outputs',
type=str,
help="Output population path"
)
parser.add_argument('--name', '-N',
default='pop.xml',
type=str,
help="Output population name (.xml)"
)
parser.add_argument('--prefix',
default='',
type=str,
help="Outputs prefix"
)
parser.add_argument('--sample',
'-S',
default=10.,
type=float,
help="% sampling as float from >0 to 100 (default = 10%)"
)
parser.add_argument('--no_freq',
'-NF',
action='store_true',
help="Sample to approx. 2.5% by setting plan weightings to 1"
)
parser.add_argument('--limit', '-L',
default=0,
type=int,
help='set plan limit, eg 1000 plans, default 0 denotes no limit'
)
parser.add_argument('--keep_dummies', '-KD',
action='store_true',
help='keep dummies from input plans'
)
parser.add_argument('--epsg',
default=27700, # MATSim docs recommend local coordinates system over global eg 4326 (~WGS84)
type=int,
help="Input required crs (default: 27700 (UK Grid))"
)
parser.add_argument('--input',
default=population_input,
type=str,
help="Input string population path (default: {})".format(population_input)
)
parser.add_argument('--zones',
default=geo_input,
type=str,
help="Input string areas shapes path (default: {})".format(geo_input)
)
parser.add_argument('--verbose', '-V',
action='store_true'
)
parser.add_argument('--all_cars', '-AC',
action='store_true'
)
parser.add_argument('--force_home', '-FH',
action='store_true'
)
parser.add_argument('--seed',
default=1234,
type=int
)
arguments = parser.parse_args()
assert arguments.sample <= 100
return arguments
if __name__ == '__main__':
args = get_args()
config = LoPopSConfig
config.VERBOSE = args.verbose
config.OUTPATH = args.out
config.XMLNAME = args.name
config.XMLPATH = os.path.join(config.OUTPATH, config.XMLNAME)
config.SAMPLE = args.sample
config.EPSG = args.epsg
config.SEED = args.seed
config.PREFIX = args.prefix
config.INPUTPATH = args.input
config.ZONESPATH = args.zones
config.LIMIT = args.limit
config.NOFREQ = args.no_freq
config.NORM = None
config.DUMMIES = args.keep_dummies
config.ALLCARS = args.all_cars
config.FORCEHOME = args.force_home
plans = lopops.Data(config) # Load raw plans and prepare
sampler = samplers.ObjectSampler(config)
population = samplers.make_pop(plans, sampler) # sample from synth
population.make_records(config)
output.write_xml_plans(population, config) # write
output.write_xml_attributes(population, config) # write
tables = output.Tables(config, population)
tables.write(config.PREFIX)
tables.describe(config.PREFIX)
| 34.246377
| 117
| 0.516081
|
43d9df133792652927d204127ab784eee427f9e9
| 2,147
|
py
|
Python
|
forceDAQ/data_handling/read_force_data.py
|
raunaqbhirangi/pyForceDAQ
|
a2a41cd7a4a4f0afd178bc5555ba4e0540902d30
|
[
"MIT"
] | 8
|
2016-06-27T12:07:14.000Z
|
2022-03-29T08:59:44.000Z
|
forceDAQ/data_handling/read_force_data.py
|
raunaqbhirangi/pyForceDAQ
|
a2a41cd7a4a4f0afd178bc5555ba4e0540902d30
|
[
"MIT"
] | 1
|
2020-01-15T20:29:53.000Z
|
2020-07-31T17:35:34.000Z
|
forceDAQ/data_handling/read_force_data.py
|
raunaqbhirangi/pyForceDAQ
|
a2a41cd7a4a4f0afd178bc5555ba4e0540902d30
|
[
"MIT"
] | 3
|
2020-01-14T18:31:39.000Z
|
2022-03-25T05:56:40.000Z
|
"""
Functions to read your force and event data
"""
__author__ = 'Oliver Lindemann'
import os
import sys
import gzip
from collections import OrderedDict
import numpy as np
TAG_COMMENTS = "#"
TAG_UDPDATA = TAG_COMMENTS + "UDP"
TAG_DAQEVENTS = TAG_COMMENTS + "T"
def _csv(line):
return list(map(lambda x: x.strip(), line.split(",")))
def DataFrameDict(data, varnames):
"""data frame: Dict of numpy arrays
does not require Pandas, but can be easily converted to pandas dataframe
via pandas.DataFrame(data_frame_dict)
"""
rtn = OrderedDict()
for v in varnames:
rtn[v] = []
for row in data:
for v, d in zip(varnames, row):
rtn[v].append(d)
return rtn
def data_frame_to_text(data_frame):
rtn = ",".join(data_frame.keys())
rtn += "\n"
for x in np.array(list(data_frame.values())).T:
rtn += ",".join(x) + "\n"
return rtn
def read_raw_data(path):
"""reading trigger and udp data
Returns: data, udp_event, daq_events and comments
data, udp_event, daq_events: DataFrameDict
comments: text string
"""
daq_events = []
udp_events = []
comments = ""
data = []
varnames = None
app_dir = os.path.split(sys.argv[0])[0]
path = os.path.abspath(os.path.join(app_dir, path))
if path.endswith("gz"):
fl = gzip.open(path, "rt")
else:
fl = open(path, "rt")
for ln in fl:
if ln.startswith(TAG_COMMENTS):
comments += ln
if ln.startswith(TAG_UDPDATA + ","):
udp_events.append(_csv(ln[len(TAG_UDPDATA) + 1:]))
elif ln.startswith(TAG_DAQEVENTS):
daq_events.append(_csv(ln[len(TAG_DAQEVENTS) + 1:]))
else:
# data
if varnames is None:
# first row contains varnames
varnames = _csv(ln)
else:
data.append(_csv(ln))
fl.close()
return (DataFrameDict(data, varnames),
DataFrameDict(udp_events, ["time", "value"]),
DataFrameDict(daq_events, ["time", "value"]),
comments)
| 23.855556
| 76
| 0.581276
|
dfc5ecbb4c8444bb13e33e2ebe88f2e9d56fb6df
| 549
|
py
|
Python
|
semisup/tools/data_dirs.py
|
hansroh/learning_by_association
|
21878a84baf3cc89fcc32e7cc591038d231bd00e
|
[
"Apache-2.0"
] | null | null | null |
semisup/tools/data_dirs.py
|
hansroh/learning_by_association
|
21878a84baf3cc89fcc32e7cc591038d231bd00e
|
[
"Apache-2.0"
] | null | null | null |
semisup/tools/data_dirs.py
|
hansroh/learning_by_association
|
21878a84baf3cc89fcc32e7cc591038d231bd00e
|
[
"Apache-2.0"
] | null | null | null |
"""
This file contains the directory paths to data sets on your hard drive.
Change the directories and .gitignore this file.
"""
synth = '/work/haeusser/data/synth/'
stl10 = '/work/haeusser/data/stl10_binary/'
svhn = '/work/haeusser/data/svhn/'
mnist = 'mnidata'
imagenet = '/work/haeusser/data/imagenet/raw-data/'
imagenet_labels = '/usr/wiss/haeusser/libs/tfmodels/inception/inception/data/imagenet_lsvrc_2015_synsets.txt'
gtsrb = '/work/haeusser/data/gtsrb/'
usps = '/work/haeusser/data/usps/'
office = '/work/haeusser/data/office/'
mnistm = ''
| 36.6
| 109
| 0.746812
|
671ddbf8709248d5f877244309194a7ef9e2cc38
| 21,089
|
py
|
Python
|
scripts/pretraining/torch/bert/run_pretraining.py
|
leezu/gluon-nlp
|
19de74c2b03f22dde8311a0225b4571c2deef0e4
|
[
"Apache-2.0"
] | 2,461
|
2018-04-25T03:47:22.000Z
|
2022-03-31T03:58:48.000Z
|
scripts/pretraining/torch/bert/run_pretraining.py
|
leezu/gluon-nlp
|
19de74c2b03f22dde8311a0225b4571c2deef0e4
|
[
"Apache-2.0"
] | 1,450
|
2018-04-25T16:14:25.000Z
|
2022-02-24T21:02:57.000Z
|
scripts/pretraining/torch/bert/run_pretraining.py
|
leezu/gluon-nlp
|
19de74c2b03f22dde8311a0225b4571c2deef0e4
|
[
"Apache-2.0"
] | 578
|
2018-04-25T04:55:18.000Z
|
2022-03-16T03:01:45.000Z
|
"""Pretraining on Code"""
import argparse
import functools
import json
import logging
import os
import pathlib
import random
import shutil
import time
import warnings
from contextlib import suppress
import gluonnlp as nlp
import numpy as np
import pyarrow as pa
import pyarrow.compute
import pyarrow.dataset
import torch as th
from fairscale.nn.data_parallel import ShardedDataParallel
from fairscale.optim.grad_scaler import ShardedGradScaler
from fairscale.optim.oss import OSS
from torch import distributed
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
def repeat(iterable, count=None, *, set_epoch=False):
if count is None:
i = 0
while True:
if set_epoch:
iterable.sampler.set_epoch(i)
for sample in iterable:
yield sample
i += 1
else:
for i in range(count):
if set_epoch:
iterable.sampler.set_epoch(i)
for sample in iterable:
yield sample
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=__doc__)
# Input / output
group = parser.add_argument_group('Input / output')
group.add_argument('--input-files', type=pathlib.Path, nargs='+')
group.add_argument(
'--mmap-folder', type=pathlib.Path, default='/dev/shm/gluonnlp',
help='Folder to place mmap files for sharing dataset accross local worker processes.')
group.add_argument('--lang', type=str, choices=['python'])
group.add_argument('--ckpt_dir', type=str, default='./ckpt_dir',
help='Path to checkpoint directory')
group.add_argument('--log_interval', type=int, default=50, help='Report interval')
group.add_argument('--ckpt_interval', type=int, default=25000, help='Checkpoint interval')
group.add_argument('--verbose', action='store_true', help='verbose logging')
# model
group = parser.add_argument_group('Model')
group.add_argument('--model_name', type=str, default='coder_base',
choices=nlp.models.bert.list_pretrained_bert(),
help='Name of the model configuration.')
# training
group = parser.add_argument_group('Training')
group.add_argument('--seed', type=int, default=100, help='Random seed')
group.add_argument('--batch_size', type=int, default=8,
help='Batch size per GPU in a minibatch.')
group.add_argument(
'--num_accumulated', type=int, default=1,
help='Number of batches for gradient accumulation. '
'total_batch_size = batch_size_per_worker * num_worker * accumulate.')
group.add_argument('--num_steps', type=int, default=20, help='Number of optimization steps')
group.add_argument('--start_step', type=int, default=0,
help='Start optimization step from the checkpoint.')
group.add_argument('--lr', type=float, default=0.005, help='Learning rate')
group.add_argument('--weight_decay', type=float, default=0.01, help='weight decay')
group.add_argument('--max_grad_norm', type=float, default=1.0, help='Max gradient norm.')
group.add_argument('--warmup_ratio', type=float, default=0.05,
help='Ratio of warmup steps in the learning rate scheduler.')
group.add_argument('--const_ratio', type=float, default=0.25,
help='Ratio of constant steps in the learning rate scheduler.')
group.add_argument('--num_dataloader_workers', type=int, default=4,
help='Number of workers to pre-process dataset.')
# phase 2
parser.add_argument('--phase2', action='store_true', help='phase 2 training')
parser.add_argument('--phase1_num_steps', type=int, help='number of steps for phase 1')
# computation and communication
group = parser.add_argument_group('Computation and communication')
group.add_argument("--local_rank", type=int, default=-1, help="Rank in distributed training")
group.add_argument("--fp16", action=nlp.utils.misc.BooleanOptionalAction, default=True,
help="Whether to use 16-bit (mixed) precision instead of 32-bit.")
parser.add_argument(
"--ZeRO", action=nlp.utils.misc.BooleanOptionalAction, default=False,
help="Use ZeRO parameter and optimizer state sharding. "
"Helps speed-up and reduce memory usage of large models.")
group.add_argument("--cuda", action=nlp.utils.misc.BooleanOptionalAction, default=True,
help="Use Cuda if available.")
group.add_argument("--activation-checkpointing", action=nlp.utils.misc.BooleanOptionalAction,
default=False, help="Trade compute for memory by checkpointing activations.")
args = parser.parse_args()
# Yet to be supported settings
assert not args.activation_checkpointing # TODO
return args
def final_save(model, save_dir, vocab, cfg):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, 'model.yml'), 'w') as f:
f.write(cfg.dump())
with open(os.path.join(save_dir, 'vocab.json'), 'w') as f:
json.dump(vocab, f)
th.save(model.state_dict(), os.path.join(save_dir, 'model.params'))
logging.info('Statistics:')
old_names = os.listdir(save_dir)
for old_name in old_names:
new_name, long_hash = nlp.utils.misc.naming_convention(save_dir, old_name)
old_path = os.path.join(save_dir, old_name)
new_path = os.path.join(save_dir, new_name)
shutil.move(old_path, new_path)
file_size = os.path.getsize(new_path)
logging.info('\t{}/{} {} {}'.format(save_dir, new_name, long_hash, file_size))
def parameters_option(step_num, model, args, option='Saving', ctx_l=None):
"""Save or load the model parameter, marked by step_num."""
param_path = os.path.join(args.ckpt_dir, f'{step_num:07}.params')
logging.info(f'[Step {step_num}], {option} model params to/from {param_path}.')
if option == 'Saving':
th.save(model.state_dict(), param_path)
elif option == 'Loading':
model.load_state_dict(th.load(param_path, map_location=args.device))
else:
raise NotImplementedError('Unknown Option: {}'.format(option))
def states_option(step_num, optimizer, args, option='Saving'):
"""Save or load the trainer states, marked by step_num and local rank."""
state_path = os.path.join(args.ckpt_dir, f'{step_num:07}.states.{args.local_rank:02}')
logging.info(f'[Step {step_num}], {option} trainer states to/from {state_path}.')
if option == 'Saving':
th.save(optimizer.state_dict(), state_path)
elif option == 'Loading':
optimizer.load_state_dict(th.load(state_path))
else:
raise NotImplementedError('Unknown Option: {}'.format(option))
def set_seed(seed):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
th.manual_seed(seed)
th.cuda.manual_seed(seed)
th.backends.cudnn.deterministic = True
def get_world_size(args):
if args.local_rank != -1:
return distributed.get_world_size()
return 1
def collate_fn(indices, *, args, tbl):
batch = tbl.take(indices).to_pydict()
pad_fn = nlp.torch.data.batchify.Pad()
input_id = pad_fn(batch['quickthought1'] + batch['quickthought2'])
segment_id = th.zeros_like(input_id)
valid_length = th.tensor(batch['validlength1'] + batch['validlength2'])
mlm_positions = batch['mlmpositions1'] + batch['mlmpositions2']
# Masked positions with respect to flattened contextual_embedding (batch_size * seq_length, units)
seq_length = input_id.shape[1]
mlm_positions = [np.array(pos) + seq_length * i for i, pos in enumerate(mlm_positions)]
mlm_positions = th.tensor(np.concatenate(mlm_positions).astype(np.int64))
mlm_labels = batch['mlmlabels1'] + batch['mlmlabels2']
mlm_labels = th.tensor(np.concatenate(mlm_labels).astype(np.int64))
return input_id, segment_id, valid_length, mlm_positions, mlm_labels
def train(args, *, tbl):
cfg, tokenizer, _, _ = nlp.models.bert.get_pretrained_bert(args.model_name, load_backbone=False,
load_mlm=False)
cfg = nlp.torch.models.bert.BertModel.get_cfg().clone_merge(cfg)
model = nlp.torch.models.bert.QTBertForPretrain(cfg)
model.to(args.device)
if args.start_step:
logging.info('Restart training from {}'.format(args.start_step))
parameters_option(args.start_step, model, args, 'Loading')
else:
model.apply(nlp.torch.models.bert.init_weights)
writer = None
if args.local_rank in (-1, 0):
writer = SummaryWriter(log_dir=os.path.join(args.ckpt_dir, 'tensorboard'))
# pin_memory=False due to lack of https://github.com/pytorch/pytorch/commit/54ce171f16c8859f829dde09f87c364c8a6b4130
sampler = RandomSampler(tbl) if args.local_rank == -1 else DistributedSampler(
tbl, seed=args.seed)
# batch_size // 2 for QuickThought
train_dataloader = DataLoader(np.arange(len(tbl)), sampler=sampler,
collate_fn=functools.partial(collate_fn, args=args, tbl=tbl),
batch_size=args.batch_size // 2,
num_workers=args.num_dataloader_workers, pin_memory=True)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{
'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay':
args.weight_decay
}, {
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay':
0.0
}]
optimizer_arguments = {"lr": args.lr}
if get_world_size(args) > 1 and args.ZeRO:
optimizer = OSS(params=model.parameters(), optim=nlp.torch.optimizers.FusedLANS,
**optimizer_arguments)
model = ShardedDataParallel(model, optimizer)
elif get_world_size(args) > 1:
optimizer = nlp.torch.optimizers.FusedLANS(optimizer_grouped_parameters,
**optimizer_arguments)
model = DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank, find_unused_parameters=True)
else:
optimizer = nlp.torch.optimizers.FusedLANS(optimizer_grouped_parameters,
**optimizer_arguments)
save_interval = args.ckpt_interval
logging.info(f'#Total Training Steps={args.num_steps}, '
f'Warmup Steps={args.warmup_ratio * args.num_steps}, '
f'Save Interval={save_interval}')
scheduler = nlp.torch.optimizers.schedules.get_warmup_linear_const_decay_poly_schedule(
optimizer, total_steps=args.num_steps, warmup_ratio=args.warmup_ratio,
const_ratio=args.const_ratio)
if args.start_step:
logging.info(f'Restart training from {args.start_step}')
states_option(args.start_step, optimizer, args, 'Loading')
ce_loss_fn = th.nn.CrossEntropyLoss()
step_num = args.start_step
if args.phase2:
step_num -= args.phase1_num_steps
running_num_tks, running_grad_norm = 0, 0
running_mlm_loss, running_qt_loss, running_mlm_acc, running_qt_acc = 0, 0, 0, 0
train_start_time = time.time()
tic = time.time()
model.zero_grad()
if get_world_size(args) > 1 and args.ZeRO:
scaler = ShardedGradScaler() if args.fp16 else None
else:
scaler = th.cuda.amp.GradScaler() if args.fp16 else None
train_iter = repeat(train_dataloader, set_epoch=args.local_rank != -1)
while step_num < args.num_steps:
step_num += 1
for accum_step in range(args.num_accumulated):
(input_id, segment_id, valid_length, mlm_positions, mlm_labels) = next(train_iter)
(input_id, segment_id, valid_length, mlm_positions,
mlm_labels) = (arr.to(args.device) for arr in next(train_iter))
model.train()
accumulation = ((accum_step + 1) % args.num_accumulated != 0)
with model.no_sync() if get_world_size(args) > 1 and accumulation else suppress():
with th.cuda.amp.autocast(enabled=args.fp16):
_, pooled_out, mlm_scores, qt_similarity = model(input_id, segment_id,
valid_length, mlm_positions)
mlm_loss = ce_loss_fn(mlm_scores, mlm_labels)
qt_label = th.arange(len(input_id) // 2, device=args.device)
qt_loss = ce_loss_fn(qt_similarity, qt_label)
loss = mlm_loss + qt_loss
if args.num_accumulated > 1:
loss = loss / args.num_accumulated
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
with th.no_grad():
qt_acc = (qt_similarity.argmax(dim=1) == qt_label).sum() / (len(input_id) // 2)
mlm_acc = (mlm_scores.argmax(dim=1) == mlm_labels).sum() / len(mlm_labels)
# Gather information from all workers for accurate statistics
reduced_num_tokens = valid_length.sum()
if get_world_size(args) > 1:
distributed.all_reduce(reduced_num_tokens)
reduced_num_mlm_tokens = th.tensor(len(mlm_labels), device=args.device)
if get_world_size(args) > 1:
distributed.all_reduce(reduced_num_mlm_tokens)
reduced_loss_mlm = mlm_loss.detach().clone() * len(mlm_labels) / reduced_num_mlm_tokens
if get_world_size(args) > 1:
distributed.all_reduce(reduced_loss_mlm)
reduced_acc_mlm = mlm_acc.detach().clone() * len(mlm_labels) / reduced_num_mlm_tokens
if get_world_size(args) > 1:
distributed.all_reduce(reduced_acc_mlm)
reduced_bs = th.tensor(len(input_id), device=args.device)
if get_world_size(args) > 1:
distributed.all_reduce(reduced_bs)
reduced_loss_qt = qt_loss.detach().clone() * len(input_id) / reduced_bs
if get_world_size(args) > 1:
distributed.all_reduce(reduced_loss_qt)
reduced_acc_qt = qt_acc.detach().clone() * len(input_id) / reduced_bs
if get_world_size(args) > 1:
distributed.all_reduce(reduced_acc_qt)
running_num_tks += reduced_num_tokens.item()
running_mlm_loss += reduced_loss_mlm.item()
running_mlm_acc += reduced_acc_mlm.item()
running_qt_loss += reduced_loss_qt.item()
running_qt_acc += reduced_acc_qt.item()
if not accumulation:
if args.fp16:
scaler.unscale_(optimizer) # unscale for gradient clipping
if get_world_size(args) > 1 and args.ZeRO:
total_norm = optimizer.clip_grad_norm(args.max_grad_norm)
else:
total_norm = th.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if get_world_size(args) > 1:
distributed.all_reduce(total_norm)
total_norm /= get_world_size(args)
running_grad_norm += total_norm
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
with warnings.catch_warnings():
# Scheduler may warn if optimizer.step() call is skipped
# due to invalid gradients detected by scaler.
warnings.simplefilter("ignore", UserWarning)
scheduler.step()
optimizer.zero_grad(set_to_none=True)
if step_num % args.log_interval == 0:
toc = time.time()
wps = running_num_tks / (toc - tic)
eta = (args.num_steps - step_num) / (step_num / (toc - train_start_time)) / 3600
interval = args.log_interval * args.num_accumulated
logging.info(f'[Step {step_num}], LR={scheduler.get_last_lr()[0]:.6f}, '
f'Loss MLM/QT={running_mlm_loss / interval:.4f}/'
f'{running_qt_loss / interval:.4f}, '
f'Acc MLM/QT={running_mlm_acc / interval:.4f}/'
f'{running_qt_acc / interval:.4f}, '
f'Grad_norm={running_grad_norm / interval:.4f}, '
f'Time cost={toc - tic:.2f}, '
f'Throughput={wps:.2f} tokens/s, ETA={eta:.2f}h')
if args.local_rank in (-1, 0):
writer.add_scalar('Throughput_wps', wps, step_num)
writer.add_scalar('Loss/MLM', running_mlm_loss / interval, step_num)
writer.add_scalar('Loss/QT', running_qt_loss / interval, step_num)
writer.add_scalar('Acc/MLM', running_mlm_acc / interval, step_num)
writer.add_scalar('Acc/QT', running_qt_acc / interval, step_num)
writer.add_scalar('LR', scheduler.get_last_lr()[0], step_num)
writer.add_scalar('Grad_norm', running_grad_norm / interval, step_num)
running_num_tks, running_grad_norm = 0, 0
running_mlm_loss, running_qt_loss, running_mlm_acc, running_qt_acc = 0, 0, 0, 0
tic = time.time()
# Saving
if step_num % save_interval == 0 or step_num >= args.num_steps:
states_option(step_num, optimizer, args, 'Saving')
if args.local_rank in (0, -1):
parameters_option(step_num, model, args, 'Saving')
logging.info('Finish training step: %d', step_num)
train_end_time = time.time()
logging.info('Train cost={:.1f} s'.format(train_end_time - train_start_time))
if args.local_rank in (0, -1):
save_dir = os.path.join(args.ckpt_dir, args.model_name)
final_save(model, save_dir, tokenizer.vocab, cfg)
def main():
args = parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
nlp.utils.misc.logging_config(args.ckpt_dir, name='pretrain_bert_' + str(args.local_rank),
level=level, console=(args.local_rank in (0, -1)))
# Setup CUDA, GPU & distributed training
local_size = 1
if th.cuda.is_available() and args.cuda:
th.cuda.set_device(args.local_rank if args.local_rank != -1 else 0)
args.device = th.device("cuda", args.local_rank if args.local_rank != -1 else 0)
if args.local_rank != -1:
distributed.init_process_group(backend='nccl')
local_size = th.cuda.device_count()
else:
args.device = th.device("cpu")
if args.local_rank != -1:
distributed.init_process_group(backend='gloo')
logging.info(args)
logging.debug('Random seed set to {}'.format(args.seed))
set_seed(args.seed)
logging.info(f'Training info: num_workers: {get_world_size(args)}, '
f'local rank: {args.local_rank}')
train_tbl_id = np.random.bytes(20).hex()
if args.local_rank not in (-1, 0):
distributed.barrier() # Wait for dataset
train_tbl = nlp.utils.shm.load(args.mmap_folder / train_tbl_id)
else: # Main process
if args.local_rank != -1 and (args.mmap_folder / train_tbl_id / 'meta.pkl').exists():
distributed.barrier() # Indicate dataset is ready
train_tbl = nlp.utils.shm.load(args.mmap_folder / train_tbl_id)
else:
(args.mmap_folder / train_tbl_id).mkdir(exist_ok=True, parents=True)
ds = pa.dataset.dataset(args.input_files, format='feather')
# Without combining chunks tbl.take is 1000x slower
train_tbl = ds.to_table().combine_chunks()
if args.local_rank != -1:
nlp.utils.shm.serialize(args.mmap_folder / train_tbl_id, train_tbl)
distributed.barrier() # Indicate dataset is ready
del train_tbl
train_tbl = nlp.utils.shm.load(args.mmap_folder / train_tbl_id)
step_size = args.batch_size * args.num_accumulated * get_world_size(args)
logging.info(f'Dataset has {len(train_tbl)} rows.')
logging.info(f'Sampling {step_size} rows per step ({step_size/len(train_tbl)*100:.2f}% data)')
logging.info(f'Will iterate over the dataset during {args.num_steps} training steps '
f'{args.num_steps * step_size/len(train_tbl):.2f} times.')
train(args, tbl=train_tbl)
if __name__ == '__main__':
main()
| 47.284753
| 120
| 0.635165
|
754f911b750678fd7de9bd04ce4a4de6f1b78d7e
| 18,940
|
py
|
Python
|
test/cpython/test_cmath.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
test/cpython/test_cmath.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/cpython/test_cmath.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
from test.test_support import run_unittest
from test.test_math import parse_testfile
import unittest
import os, sys
import cmath, math
from cmath import phase, polar, rect, pi
INF = float('inf')
NAN = float('nan')
# Pyston change: pull testcases file from source directory
# locate file with test values
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
test_dir = os.path.dirname(file) or os.curdir
test_file = os.path.join(test_dir, 'cmath_testcases.txt')
complex_zeros = [complex(x, y) for x in [0.0, -0.0] for y in [0.0, -0.0]]
complex_infinities = [complex(x, y) for x, y in [
(INF, 0.0), # 1st quadrant
(INF, 2.3),
(INF, INF),
(2.3, INF),
(0.0, INF),
(-0.0, INF), # 2nd quadrant
(-2.3, INF),
(-INF, INF),
(-INF, 2.3),
(-INF, 0.0),
(-INF, -0.0), # 3rd quadrant
(-INF, -2.3),
(-INF, -INF),
(-2.3, -INF),
(-0.0, -INF),
(0.0, -INF), # 4th quadrant
(2.3, -INF),
(INF, -INF),
(INF, -2.3),
(INF, -0.0)
]]
complex_nans = [complex(x, y) for x, y in [
(NAN, -INF),
(NAN, -2.3),
(NAN, -0.0),
(NAN, 0.0),
(NAN, 2.3),
(NAN, INF),
(-INF, NAN),
(-2.3, NAN),
(-0.0, NAN),
(0.0, NAN),
(2.3, NAN),
(INF, NAN)
]]
class CMathTests(unittest.TestCase):
# list of all functions in cmath
test_functions = [getattr(cmath, fname) for fname in [
'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh',
'cos', 'cosh', 'exp', 'log', 'log10', 'sin', 'sinh',
'sqrt', 'tan', 'tanh']]
# test first and second arguments independently for 2-argument log
test_functions.append(lambda x : cmath.log(x, 1729. + 0j))
test_functions.append(lambda x : cmath.log(14.-27j, x))
def setUp(self):
self.test_values = open(test_file)
def tearDown(self):
self.test_values.close()
def rAssertAlmostEqual(self, a, b, rel_err = 2e-15, abs_err = 5e-323,
msg=None):
"""Fail if the two floating-point numbers are not almost equal.
Determine whether floating-point values a and b are equal to within
a (small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps.
"""
# special values testing
if math.isnan(a):
if math.isnan(b):
return
self.fail(msg or '{!r} should be nan'.format(b))
if math.isinf(a):
if a == b:
return
self.fail(msg or 'finite result where infinity expected: '
'expected {!r}, got {!r}'.format(a, b))
# if both a and b are zero, check whether they have the same sign
# (in theory there are examples where it would be legitimate for a
# and b to have opposite signs; in practice these hardly ever
# occur).
if not a and not b:
if math.copysign(1., a) != math.copysign(1., b):
self.fail(msg or 'zero has wrong sign: expected {!r}, '
'got {!r}'.format(a, b))
# if a-b overflows, or b is infinite, return False. Again, in
# theory there are examples where a is within a few ulps of the
# max representable float, and then b could legitimately be
# infinite. In practice these examples are rare.
try:
absolute_error = abs(b-a)
except OverflowError:
pass
else:
# test passes if either the absolute error or the relative
# error is sufficiently small. The defaults amount to an
# error of between 9 ulps and 19 ulps on an IEEE-754 compliant
# machine.
if absolute_error <= max(abs_err, rel_err * abs(a)):
return
self.fail(msg or
'{!r} and {!r} are not sufficiently close'.format(a, b))
def test_constants(self):
e_expected = 2.71828182845904523536
pi_expected = 3.14159265358979323846
self.assertAlmostEqual(cmath.pi, pi_expected, places=9,
msg="cmath.pi is {}; should be {}".format(cmath.pi, pi_expected))
self.assertAlmostEqual(cmath.e, e_expected, places=9,
msg="cmath.e is {}; should be {}".format(cmath.e, e_expected))
def test_user_object(self):
# Test automatic calling of __complex__ and __float__ by cmath
# functions
# some random values to use as test values; we avoid values
# for which any of the functions in cmath is undefined
# (i.e. 0., 1., -1., 1j, -1j) or would cause overflow
cx_arg = 4.419414439 + 1.497100113j
flt_arg = -6.131677725
# a variety of non-complex numbers, used to check that
# non-complex return values from __complex__ give an error
non_complexes = ["not complex", 1, 5L, 2., None,
object(), NotImplemented]
# Now we introduce a variety of classes whose instances might
# end up being passed to the cmath functions
# usual case: new-style class implementing __complex__
class MyComplex(object):
def __init__(self, value):
self.value = value
def __complex__(self):
return self.value
# old-style class implementing __complex__
class MyComplexOS:
def __init__(self, value):
self.value = value
def __complex__(self):
return self.value
# classes for which __complex__ raises an exception
class SomeException(Exception):
pass
class MyComplexException(object):
def __complex__(self):
raise SomeException
class MyComplexExceptionOS:
def __complex__(self):
raise SomeException
# some classes not providing __float__ or __complex__
class NeitherComplexNorFloat(object):
pass
class NeitherComplexNorFloatOS:
pass
class MyInt(object):
def __int__(self): return 2
def __long__(self): return 2L
def __index__(self): return 2
class MyIntOS:
def __int__(self): return 2
def __long__(self): return 2L
def __index__(self): return 2
# other possible combinations of __float__ and __complex__
# that should work
class FloatAndComplex(object):
def __float__(self):
return flt_arg
def __complex__(self):
return cx_arg
class FloatAndComplexOS:
def __float__(self):
return flt_arg
def __complex__(self):
return cx_arg
class JustFloat(object):
def __float__(self):
return flt_arg
class JustFloatOS:
def __float__(self):
return flt_arg
for f in self.test_functions:
# usual usage
self.assertEqual(f(MyComplex(cx_arg)), f(cx_arg))
self.assertEqual(f(MyComplexOS(cx_arg)), f(cx_arg))
# other combinations of __float__ and __complex__
self.assertEqual(f(FloatAndComplex()), f(cx_arg))
self.assertEqual(f(FloatAndComplexOS()), f(cx_arg))
self.assertEqual(f(JustFloat()), f(flt_arg))
self.assertEqual(f(JustFloatOS()), f(flt_arg))
# TypeError should be raised for classes not providing
# either __complex__ or __float__, even if they provide
# __int__, __long__ or __index__. An old-style class
# currently raises AttributeError instead of a TypeError;
# this could be considered a bug.
self.assertRaises(TypeError, f, NeitherComplexNorFloat())
self.assertRaises(TypeError, f, MyInt())
self.assertRaises(Exception, f, NeitherComplexNorFloatOS())
self.assertRaises(Exception, f, MyIntOS())
# non-complex return value from __complex__ -> TypeError
for bad_complex in non_complexes:
self.assertRaises(TypeError, f, MyComplex(bad_complex))
self.assertRaises(TypeError, f, MyComplexOS(bad_complex))
# exceptions in __complex__ should be propagated correctly
self.assertRaises(SomeException, f, MyComplexException())
self.assertRaises(SomeException, f, MyComplexExceptionOS())
def test_input_type(self):
# ints and longs should be acceptable inputs to all cmath
# functions, by virtue of providing a __float__ method
for f in self.test_functions:
for arg in [2, 2L, 2.]:
self.assertEqual(f(arg), f(arg.__float__()))
# but strings should give a TypeError
for f in self.test_functions:
for arg in ["a", "long_string", "0", "1j", ""]:
self.assertRaises(TypeError, f, arg)
def test_cmath_matches_math(self):
# check that corresponding cmath and math functions are equal
# for floats in the appropriate range
# test_values in (0, 1)
test_values = [0.01, 0.1, 0.2, 0.5, 0.9, 0.99]
# test_values for functions defined on [-1., 1.]
unit_interval = test_values + [-x for x in test_values] + \
[0., 1., -1.]
# test_values for log, log10, sqrt
positive = test_values + [1.] + [1./x for x in test_values]
nonnegative = [0.] + positive
# test_values for functions defined on the whole real line
real_line = [0.] + positive + [-x for x in positive]
test_functions = {
'acos' : unit_interval,
'asin' : unit_interval,
'atan' : real_line,
'cos' : real_line,
'cosh' : real_line,
'exp' : real_line,
'log' : positive,
'log10' : positive,
'sin' : real_line,
'sinh' : real_line,
'sqrt' : nonnegative,
'tan' : real_line,
'tanh' : real_line}
for fn, values in test_functions.items():
float_fn = getattr(math, fn)
complex_fn = getattr(cmath, fn)
for v in values:
z = complex_fn(v)
self.rAssertAlmostEqual(float_fn(v), z.real)
self.assertEqual(0., z.imag)
# test two-argument version of log with various bases
for base in [0.5, 2., 10.]:
for v in positive:
z = cmath.log(v, base)
self.rAssertAlmostEqual(math.log(v, base), z.real)
self.assertEqual(0., z.imag)
def test_specific_values(self):
if not float.__getformat__("double").startswith("IEEE"):
self.skipTest('needs IEEE double')
def rect_complex(z):
"""Wrapped version of rect that accepts a complex number instead of
two float arguments."""
return cmath.rect(z.real, z.imag)
def polar_complex(z):
"""Wrapped version of polar that returns a complex number instead of
two floats."""
return complex(*polar(z))
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
arg = complex(ar, ai)
expected = complex(er, ei)
if fn == 'rect':
function = rect_complex
elif fn == 'polar':
function = polar_complex
else:
function = getattr(cmath, fn)
if 'divide-by-zero' in flags or 'invalid' in flags:
try:
actual = function(arg)
except ValueError:
continue
else:
self.fail('ValueError not raised in test '
'{}: {}(complex({!r}, {!r}))'.format(id, fn, ar, ai))
if 'overflow' in flags:
try:
actual = function(arg)
except OverflowError:
continue
else:
self.fail('OverflowError not raised in test '
'{}: {}(complex({!r}, {!r}))'.format(id, fn, ar, ai))
actual = function(arg)
if 'ignore-real-sign' in flags:
actual = complex(abs(actual.real), actual.imag)
expected = complex(abs(expected.real), expected.imag)
if 'ignore-imag-sign' in flags:
actual = complex(actual.real, abs(actual.imag))
expected = complex(expected.real, abs(expected.imag))
# for the real part of the log function, we allow an
# absolute error of up to 2e-15.
if fn in ('log', 'log10'):
real_abs_err = 2e-15
else:
real_abs_err = 5e-323
error_message = (
'{}: {}(complex({!r}, {!r}))\n'
'Expected: complex({!r}, {!r})\n'
'Received: complex({!r}, {!r})\n'
'Received value insufficiently close to expected value.'
).format(id, fn, ar, ai,
expected.real, expected.imag,
actual.real, actual.imag)
self.rAssertAlmostEqual(expected.real, actual.real,
abs_err=real_abs_err,
msg=error_message)
self.rAssertAlmostEqual(expected.imag, actual.imag,
msg=error_message)
def assertCISEqual(self, a, b):
eps = 1E-7
if abs(a[0] - b[0]) > eps or abs(a[1] - b[1]) > eps:
self.fail((a ,b))
def test_polar(self):
self.assertCISEqual(polar(0), (0., 0.))
self.assertCISEqual(polar(1.), (1., 0.))
self.assertCISEqual(polar(-1.), (1., pi))
self.assertCISEqual(polar(1j), (1., pi/2))
self.assertCISEqual(polar(-1j), (1., -pi/2))
def test_phase(self):
self.assertAlmostEqual(phase(0), 0.)
self.assertAlmostEqual(phase(1.), 0.)
self.assertAlmostEqual(phase(-1.), pi)
self.assertAlmostEqual(phase(-1.+1E-300j), pi)
self.assertAlmostEqual(phase(-1.-1E-300j), -pi)
self.assertAlmostEqual(phase(1j), pi/2)
self.assertAlmostEqual(phase(-1j), -pi/2)
# zeros
self.assertEqual(phase(complex(0.0, 0.0)), 0.0)
self.assertEqual(phase(complex(0.0, -0.0)), -0.0)
self.assertEqual(phase(complex(-0.0, 0.0)), pi)
self.assertEqual(phase(complex(-0.0, -0.0)), -pi)
# infinities
self.assertAlmostEqual(phase(complex(-INF, -0.0)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -2.3)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -INF)), -0.75*pi)
self.assertAlmostEqual(phase(complex(-2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(-0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(INF, -INF)), -pi/4)
self.assertEqual(phase(complex(INF, -2.3)), -0.0)
self.assertEqual(phase(complex(INF, -0.0)), -0.0)
self.assertEqual(phase(complex(INF, 0.0)), 0.0)
self.assertEqual(phase(complex(INF, 2.3)), 0.0)
self.assertAlmostEqual(phase(complex(INF, INF)), pi/4)
self.assertAlmostEqual(phase(complex(2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-INF, INF)), 0.75*pi)
self.assertAlmostEqual(phase(complex(-INF, 2.3)), pi)
self.assertAlmostEqual(phase(complex(-INF, 0.0)), pi)
# real or imaginary part NaN
for z in complex_nans:
self.assertTrue(math.isnan(phase(z)))
def test_abs(self):
# zeros
for z in complex_zeros:
self.assertEqual(abs(z), 0.0)
# infinities
for z in complex_infinities:
self.assertEqual(abs(z), INF)
# real or imaginary part NaN
self.assertEqual(abs(complex(NAN, -INF)), INF)
self.assertTrue(math.isnan(abs(complex(NAN, -2.3))))
self.assertTrue(math.isnan(abs(complex(NAN, -0.0))))
self.assertTrue(math.isnan(abs(complex(NAN, 0.0))))
self.assertTrue(math.isnan(abs(complex(NAN, 2.3))))
self.assertEqual(abs(complex(NAN, INF)), INF)
self.assertEqual(abs(complex(-INF, NAN)), INF)
self.assertTrue(math.isnan(abs(complex(-2.3, NAN))))
self.assertTrue(math.isnan(abs(complex(-0.0, NAN))))
self.assertTrue(math.isnan(abs(complex(0.0, NAN))))
self.assertTrue(math.isnan(abs(complex(2.3, NAN))))
self.assertEqual(abs(complex(INF, NAN)), INF)
self.assertTrue(math.isnan(abs(complex(NAN, NAN))))
# result overflows
if float.__getformat__("double").startswith("IEEE"):
self.assertRaises(OverflowError, abs, complex(1.4e308, 1.4e308))
def assertCEqual(self, a, b):
eps = 1E-7
if abs(a.real - b[0]) > eps or abs(a.imag - b[1]) > eps:
self.fail((a ,b))
def test_rect(self):
self.assertCEqual(rect(0, 0), (0, 0))
self.assertCEqual(rect(1, 0), (1., 0))
self.assertCEqual(rect(1, -pi), (-1., 0))
self.assertCEqual(rect(1, pi/2), (0, 1.))
self.assertCEqual(rect(1, -pi/2), (0, -1.))
def test_isnan(self):
self.assertFalse(cmath.isnan(1))
self.assertFalse(cmath.isnan(1j))
self.assertFalse(cmath.isnan(INF))
self.assertTrue(cmath.isnan(NAN))
self.assertTrue(cmath.isnan(complex(NAN, 0)))
self.assertTrue(cmath.isnan(complex(0, NAN)))
self.assertTrue(cmath.isnan(complex(NAN, NAN)))
self.assertTrue(cmath.isnan(complex(NAN, INF)))
self.assertTrue(cmath.isnan(complex(INF, NAN)))
def test_isinf(self):
self.assertFalse(cmath.isinf(1))
self.assertFalse(cmath.isinf(1j))
self.assertFalse(cmath.isinf(NAN))
self.assertTrue(cmath.isinf(INF))
self.assertTrue(cmath.isinf(complex(INF, 0)))
self.assertTrue(cmath.isinf(complex(0, INF)))
self.assertTrue(cmath.isinf(complex(INF, INF)))
self.assertTrue(cmath.isinf(complex(NAN, INF)))
self.assertTrue(cmath.isinf(complex(INF, NAN)))
def test_main():
run_unittest(CMathTests)
if __name__ == "__main__":
test_main()
| 39.051546
| 80
| 0.562091
|
63f5895326f200d3462530f2df6d638cccdc88ec
| 2,308
|
py
|
Python
|
test/vanilla/low-level/Expected/AcceptanceTests/BodyDateTimeRfc1123LowLevel/bodydatetimerfc1123lowlevel/rest/datetimerfc1123/__init__.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/vanilla/low-level/Expected/AcceptanceTests/BodyDateTimeRfc1123LowLevel/bodydatetimerfc1123lowlevel/rest/datetimerfc1123/__init__.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/vanilla/low-level/Expected/AcceptanceTests/BodyDateTimeRfc1123LowLevel/bodydatetimerfc1123lowlevel/rest/datetimerfc1123/__init__.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._request_builders_py3 import build_get_null_request
from ._request_builders_py3 import build_get_invalid_request
from ._request_builders_py3 import build_get_overflow_request
from ._request_builders_py3 import build_get_underflow_request
from ._request_builders_py3 import build_put_utc_max_date_time_request
from ._request_builders_py3 import build_get_utc_lowercase_max_date_time_request
from ._request_builders_py3 import build_get_utc_uppercase_max_date_time_request
from ._request_builders_py3 import build_put_utc_min_date_time_request
from ._request_builders_py3 import build_get_utc_min_date_time_request
except (SyntaxError, ImportError):
from ._request_builders import build_get_null_request # type: ignore
from ._request_builders import build_get_invalid_request # type: ignore
from ._request_builders import build_get_overflow_request # type: ignore
from ._request_builders import build_get_underflow_request # type: ignore
from ._request_builders import build_put_utc_max_date_time_request # type: ignore
from ._request_builders import build_get_utc_lowercase_max_date_time_request # type: ignore
from ._request_builders import build_get_utc_uppercase_max_date_time_request # type: ignore
from ._request_builders import build_put_utc_min_date_time_request # type: ignore
from ._request_builders import build_get_utc_min_date_time_request # type: ignore
__all__ = [
"build_get_null_request",
"build_get_invalid_request",
"build_get_overflow_request",
"build_get_underflow_request",
"build_put_utc_max_date_time_request",
"build_get_utc_lowercase_max_date_time_request",
"build_get_utc_uppercase_max_date_time_request",
"build_put_utc_min_date_time_request",
"build_get_utc_min_date_time_request",
]
| 56.292683
| 96
| 0.771664
|
6dee37b256ec3416d0e14ad49c7d8c106de2f620
| 170
|
py
|
Python
|
tests/test_three.py
|
paulbordea/session3
|
a2edf697cbe9b9aa98383562416d4fdf1e19b482
|
[
"MIT"
] | null | null | null |
tests/test_three.py
|
paulbordea/session3
|
a2edf697cbe9b9aa98383562416d4fdf1e19b482
|
[
"MIT"
] | null | null | null |
tests/test_three.py
|
paulbordea/session3
|
a2edf697cbe9b9aa98383562416d4fdf1e19b482
|
[
"MIT"
] | null | null | null |
import pytest
from hypothesis import given
import hypothesis.strategies as st
@given(st.integers(), st.integers())
def test_adunare(x, y):
assert x + y == y + x
| 14.166667
| 36
| 0.7
|
9a718a121740b6bccd7f600a698ab66ab1dcb512
| 7,483
|
py
|
Python
|
test_bakthat.py
|
snario/bakthat
|
33f0a9b56d4273fe7a74bdb50acbe5ea5d51c384
|
[
"MIT"
] | 144
|
2015-01-07T19:43:43.000Z
|
2022-03-13T02:06:16.000Z
|
test_bakthat.py
|
snario/bakthat
|
33f0a9b56d4273fe7a74bdb50acbe5ea5d51c384
|
[
"MIT"
] | 13
|
2015-02-13T01:57:18.000Z
|
2021-05-25T20:07:54.000Z
|
test_bakthat.py
|
snario/bakthat
|
33f0a9b56d4273fe7a74bdb50acbe5ea5d51c384
|
[
"MIT"
] | 31
|
2015-03-11T18:56:21.000Z
|
2022-01-14T09:45:00.000Z
|
# -*- encoding: utf-8 -*-
import bakthat
import tempfile
import hashlib
import os
import time
import unittest
import logging
log = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.addFilter(bakthat.BakthatFilter())
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
log.addHandler(handler)
log.setLevel(logging.DEBUG)
class BakthatTestCase(unittest.TestCase):
def setUp(self):
self.test_file = tempfile.NamedTemporaryFile()
self.test_file.write("Bakthat Test File")
self.test_file.seek(0)
self.test_filename = self.test_file.name.split("/")[-1]
self.test_hash = hashlib.sha1(self.test_file.read()).hexdigest()
self.password = "bakthat_encrypted_test"
def test_internals(self):
with self.assertRaises(Exception):
bakthat._interval_string_to_seconds("1z")
self.assertEqual(bakthat._interval_string_to_seconds("2D1h"), 86400 * 2 + 3600)
self.assertEqual(bakthat._interval_string_to_seconds("3M"), 3*30*86400)
def test_keyvalue_helper(self):
from bakthat.helper import KeyValue
kv = KeyValue()
test_string = "Bakthat Test str"
test_key = "bakthat-unittest"
test_key_enc = "bakthat-unittest-testenc"
test_key2 = "itshouldfail"
test_password = "bakthat-password"
kv.set_key(test_key, test_string)
kv.set_key(test_key_enc, test_string, password=test_password)
self.assertEqual(test_string, kv.get_key(test_key))
self.assertEqual(test_string, kv.get_key(test_key_enc, password=test_password))
#from urllib2 import urlopen, HTTPError
#test_url = kv.get_key_url(test_key, 10)
#self.assertEqual(json.loads(urlopen(test_url).read()), test_string)
#time.sleep(30)
#with self.assertRaises(HTTPError):
# urlopen(test_url).read()
kv.delete_key(test_key_enc)
kv.delete_key(test_key)
self.assertEqual(kv.get_key(test_key), None)
self.assertEqual(kv.get_key(test_key2), None)
def test_s3_backup_restore(self):
backup_data = bakthat.backup(self.test_file.name, "s3", password="")
log.info(backup_data)
#self.assertEqual(bakthat.match_filename(self.test_filename, "s3")[0]["filename"],
# self.test_filename)
bakthat.restore(self.test_filename, "s3")
restored_hash = hashlib.sha1(open(self.test_filename).read()).hexdigest()
self.assertEqual(self.test_hash, restored_hash)
os.remove(self.test_filename)
bakthat.delete(self.test_filename, "s3")
#self.assertEqual(bakthat.match_filename(self.test_filename), [])
def test_s3_delete_older_than(self):
backup_res = bakthat.backup(self.test_file.name, "s3", password="")
#self.assertEqual(bakthat.match_filename(self.test_filename, "s3")[0]["filename"],
# self.test_filename)
bakthat.restore(self.test_filename, "s3")
restored_hash = hashlib.sha1(open(self.test_filename).read()).hexdigest()
self.assertEqual(self.test_hash, restored_hash)
os.remove(self.test_filename)
test_deleted = bakthat.delete_older_than(self.test_filename, "1Y", destination="s3")
self.assertEqual(test_deleted, [])
time.sleep(10)
test_deleted = bakthat.delete_older_than(self.test_filename, "9s", destination="s3")
key_deleted = test_deleted[0]
self.assertEqual(key_deleted.stored_filename, backup_res.stored_filename)
#self.assertEqual(bakthat.match_filename(self.test_filename), [])
def test_s3_encrypted_backup_restore(self):
bakthat.backup(self.test_file.name, "s3", password=self.password)
#self.assertEqual(bakthat.match_filename(self.test_filename, "s3")[0]["filename"],
# self.test_filename)
# Check if stored file is encrypted
#self.assertTrue(bakthat.match_filename(self.test_filename, "s3")[0]["is_enc"])
bakthat.restore(self.test_filename, "s3", password=self.password)
restored_hash = hashlib.sha1(open(self.test_filename).read()).hexdigest()
self.assertEqual(self.test_hash, restored_hash)
os.remove(self.test_filename)
bakthat.delete(self.test_filename, "s3")
#self.assertEqual(bakthat.match_filename(self.test_filename), [])
def test_glacier_backup_restore(self):
if raw_input("Test glacier upload/download ? It can take up to 4 hours ! (y/N): ").lower() == "y":
# Backup dummy file
bakthat.backup(self.test_file.name, "glacier", password="")
# Check that file is showing up in bakthat ls
#self.assertEqual(bakthat.match_filename(self.test_filename, "glacier")[0]["filename"],
# self.test_filename)
# TODO replace by a Backups.search
# We initialize glacier backend
# to check that the file appear in both local and remote (S3) inventory
#glacier_backend = GlacierBackend(None)
#archives = glacier_backend.load_archives()
#archives_s3 = glacier_backend.load_archives_from_s3()
# Check that local and remote custom inventory are equal
#self.assertEqual(archives, archives_s3)
# Next we check that the file is stored in both inventories
#inventory_key_name = bakthat.match_filename(self.test_filename, "glacier")[0]["key"]
#self.assertTrue(inventory_key_name in [a.get("filename") for a in archives])
#self.assertTrue(inventory_key_name in [a.get("filename") for a in archives_s3])
# Restore backup
job = bakthat.restore(self.test_filename, "glacier", job_check=True)
# Check that a job is initiated
self.assertEqual(job.__dict__["action"], "ArchiveRetrieval")
self.assertEqual(job.__dict__["status_code"], "InProgress")
while 1:
# Check every ten minutes if the job is done
result = bakthat.restore(self.test_filename, "glacier")
# If job is done, we can download the file
if result:
restored_hash = hashlib.sha1(open(self.test_filename).read()).hexdigest()
# Check if the hash of the restored file is equal to inital file hash
self.assertEqual(self.test_hash, restored_hash)
os.remove(self.test_filename)
# Now, we can delete the restored file
bakthat.delete(self.test_filename, "glacier")
# Check that the file is deleted
#self.assertEqual(bakthat.match_filename(self.test_filename, "glacier"), [])
# TODO Backups.search
#archives = glacier_backend.load_archives()
#archives_s3 = glacier_backend.load_archives_from_s3()
# Check if the file has been removed from both archives
#self.assertEqual(archives, archives_s3)
#self.assertTrue(inventory_key_name not in archives)
#self.assertTrue(inventory_key_name not in archives_s3)
break
else:
time.sleep(600)
if __name__ == '__main__':
unittest.main()
| 37.984772
| 106
| 0.644795
|
6de5e5f881003b4ed8f0ede8c122bc61e8202a00
| 309
|
py
|
Python
|
examples/web/python-cherrypy-minimal/server.py
|
gtrafimenkov/dev-toolkit
|
8d92a40db5a4dffdc3506992aba49bc9bea46e2f
|
[
"MIT"
] | null | null | null |
examples/web/python-cherrypy-minimal/server.py
|
gtrafimenkov/dev-toolkit
|
8d92a40db5a4dffdc3506992aba49bc9bea46e2f
|
[
"MIT"
] | null | null | null |
examples/web/python-cherrypy-minimal/server.py
|
gtrafimenkov/dev-toolkit
|
8d92a40db5a4dffdc3506992aba49bc9bea46e2f
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
# Copyright (c) 2019 Gennady Trafimenkov
import cherrypy
class HelloWorld(object):
@cherrypy.expose
def index(self):
return "Hello World!"
cherrypy.config.update({'server.socket_host': '0.0.0.0', 'server.socket_port': 9090})
cherrypy.quickstart(HelloWorld())
| 23.769231
| 85
| 0.721683
|
423db36f94b2c131e92c3a9b7e79848ddf2b952f
| 1,751
|
py
|
Python
|
models/movies.py
|
wanderindev/udacity-casting-agency
|
a123ff26ffc565bfff4f4f829ae7613a6ccb366d
|
[
"MIT"
] | 1
|
2021-01-10T19:34:24.000Z
|
2021-01-10T19:34:24.000Z
|
models/movies.py
|
wanderindev/udacity-casting-agency
|
a123ff26ffc565bfff4f4f829ae7613a6ccb366d
|
[
"MIT"
] | 1
|
2021-04-30T21:07:42.000Z
|
2021-04-30T21:07:42.000Z
|
models/movies.py
|
wanderindev/udacity-casting-agency
|
a123ff26ffc565bfff4f4f829ae7613a6ccb366d
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Union
from db import db
from models.actors import ActorModel
from models.model_mixin import ModelMixin
MovieJSON = Dict[str, Union[int, str, List[str]]]
"""
Create an association table for the many-to-many relation
between movies and actors
"""
movies_actors = db.Table(
"movies_actors",
db.Column(
"movie_id", db.Integer, db.ForeignKey("movies.id"), nullable=False
),
db.Column(
"actor_id", db.Integer, db.ForeignKey("actors.id"), nullable=False
),
db.PrimaryKeyConstraint("movie_id", "actor_id"),
)
class MovieModel(db.Model, ModelMixin):
"""SQLAlchemy model for movies"""
__tablename__ = "movies"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), nullable=False)
release_date = db.Column(db.Date, nullable=False)
actors = db.relationship(
ActorModel,
secondary="movies_actors",
lazy=True,
backref=db.backref("movies", lazy=True),
)
def __init__(self, **kwargs):
super(MovieModel, self).__init__(**kwargs)
@classmethod
def find_all(cls) -> List["MovieModel"]:
return cls.query.order_by(MovieModel.title).all()
@classmethod
def find_by_id(cls, _id: int) -> "MovieModel":
return cls.query.filter_by(id=_id).first()
@classmethod
def find_by_title(cls, title: str) -> "MovieModel":
return cls.query.filter_by(title=title).first()
def json(self) -> MovieJSON:
return {
"id": self.id,
"title": self.title,
"release_date": self.release_date,
"actors": [actor.json() for actor in self.actors],
}
| 28.241935
| 75
| 0.618504
|
002d19b388f6b51aa562875c65c88b7f28c6c576
| 115
|
py
|
Python
|
lib/pylint/test/regrtest_data/no_stdout_encoding.py
|
willemneal/Docky
|
d3504e1671b4a6557468234c263950bfab461ce4
|
[
"MIT"
] | 3
|
2018-11-25T01:09:55.000Z
|
2021-08-24T01:56:36.000Z
|
lib/pylint/test/regrtest_data/no_stdout_encoding.py
|
willemneal/Docky
|
d3504e1671b4a6557468234c263950bfab461ce4
|
[
"MIT"
] | 7
|
2021-02-08T20:22:15.000Z
|
2022-03-11T23:19:41.000Z
|
lib/pylint/test/regrtest_data/no_stdout_encoding.py
|
willemneal/Docky
|
d3504e1671b4a6557468234c263950bfab461ce4
|
[
"MIT"
] | 3
|
2018-11-09T03:38:09.000Z
|
2020-02-24T06:26:10.000Z
|
#-*-coding:iso-8859-1-*-
class test:
def __init__ (self, dir):
testString = u"répertoire :\n%s !"%dir
| 19.166667
| 46
| 0.582609
|
6737923d21193df7f8b256e97cb723735421bf35
| 511
|
py
|
Python
|
testprojects/src/python/interpreter_selection/echo_interpreter_version.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | 94
|
2015-01-15T21:24:20.000Z
|
2022-02-16T16:55:43.000Z
|
testprojects/src/python/interpreter_selection/echo_interpreter_version.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | 5
|
2020-07-18T01:04:43.000Z
|
2021-05-10T08:40:56.000Z
|
testprojects/src/python/interpreter_selection/echo_interpreter_version.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | 47
|
2015-02-25T02:20:07.000Z
|
2022-03-21T00:59:16.000Z
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
# Useful for testing which interpreter the pex machinery selects.
v = sys.version_info
print('%d.%d.%d' % v[0:3])
def say_hello():
# NOTE: Do not change this text without changing tests that check for it.
print('echo_interpreter_version loaded successfully.')
| 26.894737
| 82
| 0.765166
|
cba291930ee534f412039ba07785faee300a5912
| 98
|
py
|
Python
|
GipProjectClone/adddd.py
|
Mushrume/AppPython-GipGame
|
e92a7f1c9d44f302cbbf8d79f3ab8cdf297d6981
|
[
"MIT"
] | null | null | null |
GipProjectClone/adddd.py
|
Mushrume/AppPython-GipGame
|
e92a7f1c9d44f302cbbf8d79f3ab8cdf297d6981
|
[
"MIT"
] | 6
|
2021-10-09T03:40:30.000Z
|
2021-11-26T02:25:39.000Z
|
GipProjectClone/adddd.py
|
Mushrume/AppPython-GipGame
|
e92a7f1c9d44f302cbbf8d79f3ab8cdf297d6981
|
[
"MIT"
] | 4
|
2021-09-08T10:51:47.000Z
|
2021-11-23T19:20:47.000Z
|
from rich.traceback import install
install()
def add(x, y):
return x+y
add(1, "a")
| 10.888889
| 35
| 0.591837
|
c5dbec713bc194a6916e548695dd8b86bc98a04d
| 3,317
|
py
|
Python
|
tests/utils/mock_server.py
|
ant-lastline/cb-lastline-connector
|
ccdf2c7e33dc94acb382fb4694b9a65b44142b26
|
[
"MIT"
] | 2
|
2017-01-23T22:09:23.000Z
|
2018-07-16T04:37:41.000Z
|
tests/utils/mock_server.py
|
ant-lastline/cb-lastline-connector
|
ccdf2c7e33dc94acb382fb4694b9a65b44142b26
|
[
"MIT"
] | null | null | null |
tests/utils/mock_server.py
|
ant-lastline/cb-lastline-connector
|
ccdf2c7e33dc94acb382fb4694b9a65b44142b26
|
[
"MIT"
] | 1
|
2016-08-16T23:45:29.000Z
|
2016-08-16T23:45:29.000Z
|
import logging
import os
try:
import simplejson as json
except ImportError:
import json
from flask import Flask, request, make_response, Response
from cStringIO import StringIO
import zipfile
def get_mocked_server(binary_directory):
mocked_cb_server = Flask('cb')
files = os.listdir(binary_directory)
@mocked_cb_server.route('/api/v1/binary', methods=['GET', 'POST'])
def binary_search_endpoint():
if request.method == 'GET':
query_string = request.args.get('q', '')
rows = int(request.args.get('rows', 10))
start = int(request.args.get('start', 0))
elif request.method == 'POST':
parsed_data = json.loads(request.data)
if 'q' in parsed_data:
query_string = parsed_data['q']
else:
query_string = ''
if 'rows' in parsed_data:
rows = int(parsed_data['rows'])
else:
rows = 10
if 'start' in parsed_data:
start = int(parsed_data['start'])
else:
start = 0
else:
return make_response('Invalid Request', 500)
return Response(response=json.dumps(binary_search(query_string, rows, start)),
mimetype='application/json')
def binary_search(q, rows, start):
return {
'results':
[json.load(open(os.path.join(binary_directory, fn), 'r')) for fn in files[start:start+rows]],
'terms': '',
'total_results': len(files),
'start': start,
'elapsed': 0.1,
'highlights': [],
'facets': {}
}
@mocked_cb_server.route('/api/v1/binary/<md5sum>/summary')
def get_binary_summary(md5sum):
filepath = os.path.join(binary_directory, '%s.json' % md5sum.lower())
if not os.path.exists(filepath):
return Response("File not found", 404)
binary_data = open(filepath, 'r').read()
return Response(response=binary_data, mimetype='application/json')
@mocked_cb_server.route('/api/v1/binary/<md5sum>')
def get_binary(md5sum):
metadata_filepath = os.path.join(binary_directory, '%s.json' % md5sum.lower())
content_filepath = os.path.join(binary_directory, '%s' % md5sum.lower())
for filepath in [metadata_filepath, content_filepath]:
if not os.path.exists(filepath):
return Response("File not found", 404)
zipfile_contents = StringIO()
zf = zipfile.ZipFile(zipfile_contents, 'w', zipfile.ZIP_DEFLATED, False)
zf.writestr('filedata', open(content_filepath, 'r').read())
zf.writestr('metadata', open(metadata_filepath, 'r').read())
zf.close()
return Response(response=zipfile_contents.getvalue(), mimetype='application/zip')
@mocked_cb_server.route('/api/info')
def info():
return Response(response=json.dumps({"version": "5.1.0"}), mimetype='application/json')
return mocked_cb_server
if __name__ == '__main__':
mydir = os.path.dirname(os.path.abspath(__file__))
binaries_dir = os.path.join(mydir, '..', 'data', 'binary_data')
mock_server = get_mocked_server(binaries_dir)
mock_server.run('127.0.0.1', 7982, debug=True)
| 34.195876
| 109
| 0.601447
|
ac425205153af7646fa62983e5e337dc51af899e
| 6,039
|
py
|
Python
|
MACD.py
|
YashSheth1/Algo-Trading
|
0ef8f55978ed93af1eb69f1fb9aedd6002f7ea51
|
[
"MIT"
] | null | null | null |
MACD.py
|
YashSheth1/Algo-Trading
|
0ef8f55978ed93af1eb69f1fb9aedd6002f7ea51
|
[
"MIT"
] | null | null | null |
MACD.py
|
YashSheth1/Algo-Trading
|
0ef8f55978ed93af1eb69f1fb9aedd6002f7ea51
|
[
"MIT"
] | null | null | null |
from get_agg import get_agg
#import sys
"""
---MACD---
1. Definations
1.1 MA
1.2 MAcalculation
1.3 showdata
1.4 showEdata(Exponential)
---Flow Of CODE---
2.1 Configerations are received from trading Script
2.2 GET Aggregated Candles
2.3 Call MA() to calculate MA1,MA2,MACD,SIGNAL,EMA1,EMA2,E-MACD,E-SIGNAL
2.4 Return a list of Objects of MACD class
"""
class MACD():
def __init__(self,period=None,ma1=None,ma2=None,sp=None,macd1=None,ema1=None,ema2=None,emacd1=None,esp=None):
self.period=period
#For SMA's
self.ma1=ma1
self.ma2=ma2
self.sp=sp
self.macd1=macd1
#for EMA's
self.ema1=ema1
self.ema2=ema2
self.esp=esp
self.emacd1=emacd1
def MA(self,period_data,fast_period,slow_period,signal_period):
counter_for_ma=0
counter_for_macd_list=0
#constant for multiplying in macd formula
k1=2.0/(fast_period+1)
k2=2.0/(slow_period+1)
k3=2.0/(signal_period+1)
sp=0.0
ma1=0.0
ma2=0.0
ema1=0.0
ema2=0.0
ema_calculation_signal=0
ema2_calculation_signal=0
esp2_calculation_signal=0
ma2_calculation_list=[0.0]*(slow_period)
ma_calculation_list=[0.0]*(fast_period)
Calculated_answer=[]
tmacd=0.0
temacd=0
time_end=''
macd=[0.0]*(signal_period)
emacd=[0.0]*(signal_period)
counter=0
counter1=0
counter3_esp=0
counter_for_esp_calculaton=0
for current_item in period_data:
current_close_price=current_item.close
#keep the list of the size of WINDOW so extra memory is not USED
ma_calculation_list[counter_for_ma%(fast_period)]=current_close_price
ma2_calculation_list[counter_for_ma%(slow_period)]=current_close_price
"""-----------------Simple Moving Average-------------------"""
#------------------------------------------------MA1--------------------------------
if counter_for_ma>=(fast_period-1):
ma1=sum(ma_calculation_list)/fast_period
ema_calculation_signal=1
else:
ma1=0
#-------------------------------------------MA2-------------------------------
if counter_for_ma>=(slow_period-1):
ma2=sum(ma2_calculation_list)/slow_period
tmacd=ma1-ma2
time_end=str(current_item.time_end)
macd[counter_for_macd_list%(signal_period)]=tmacd
ema2_calculation_signal=1
counter_for_macd_list=counter_for_macd_list+1
else:
ma2=0
#---------------------------------------Signal--------------------------
if counter_for_macd_list>=(signal_period-1) and ema2_calculation_signal==1:
sp=sum(macd)/signal_period
else:
sp=0
"""-----------------Exponential Moving Average-------------------"""
#----------------------------------------EMA1---------------------------
if ema_calculation_signal==1:
#for EMA1
if counter==0:
ema1=ma1
else:
ema1=ema1+k1*(current_close_price-ema1)
counter+=1
else:
ema1=0
#--------------------------------------EMA2-----------------------------
if ema2_calculation_signal==1:
#for EMA2
if counter1==0:
ema2=ma2
else:
ema2=ema2+k2*(current_close_price-ema2)
counter1+=1
temacd=ema1-ema2
emacd[counter_for_esp_calculaton%signal_period]=temacd
esp2_calculation_signal=1
counter_for_esp_calculaton=counter_for_esp_calculaton+1
else:
ema2=0
#----------------------------------E-Signal----------------------------
if esp2_calculation_signal==1 and counter_for_esp_calculaton>=signal_period:
if counter3_esp==0:
esp=temacd
else:
esp=esp+k3*(tmacd-esp)
counter3_esp+=1
else:
esp=0
#-----------------------------final ANSWER-----------------------------
counter_for_ma=counter_for_ma+1
#print ema1,ema2,tmacd,temacd
MACD_class_object=MACD(current_item,ma1,ma2,sp,tmacd,ema1,ema2,temacd,esp)
Calculated_answer.append(MACD_class_object)
return Calculated_answer
def showdata(self):
#self.period.show()
print"____________________________"
print 'MA1'
print self.ma1
print "MA2"
print self.ma2
print "MACD"
print self.macd1
print "Signal Value"
print self.sp
print "+++++++++++++++++++++++++++"
def showEdata(self):
#self.period.show()
print"____________________________"
print 'EMA1'
print self.ema1
print "EMA2"
print self.ema2
print "EMACD"
print self.emacd1
print "E ignal Value"
print self.esp
print "+++++++++++++++++++++++++++"
def MAcalculation(self,data,datatype,date1,date2,pro,kname,session):
period_data=get_agg(data[0],datatype,date1,date2,pro,kname,session)
Calculated_MA=self.MA(period_data,data[1],data[2],data[3])
return Calculated_MA
| 34.508571
| 114
| 0.472264
|
6573f218a2e6ca430c21d411802c39aa707fbd99
| 12,535
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_vendor/chardet/langgreekmodel.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/pip/_vendor/chardet/langgreekmodel.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/pip/_vendor/chardet/langgreekmodel.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020
# Author: xiaoweixiang
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'char_to_order_map': Latin7_char_to_order_map,
'precedence_matrix': GreekLangModel,
'typical_positive_ratio': 0.982851,
'keep_english_letter': False,
'charset_name': "ISO-8859-7",
'language': 'Greek',
}
Win1253GreekModel = {
'char_to_order_map': win1253_char_to_order_map,
'precedence_matrix': GreekLangModel,
'typical_positive_ratio': 0.982851,
'keep_english_letter': False,
'charset_name': "windows-1253",
'language': 'Greek',
}
| 55.711111
| 70
| 0.546869
|
22427aa2238d56acd1ef5dd405848a28afe8b769
| 247,346
|
py
|
Python
|
json_to_relation/edxTrackLogJSONParser.py
|
paepcke/json_to_relation
|
acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9
|
[
"BSD-3-Clause"
] | 4
|
2015-10-10T19:09:49.000Z
|
2021-09-02T00:58:06.000Z
|
json_to_relation/edxTrackLogJSONParser.py
|
paepcke/json_to_relation
|
acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9
|
[
"BSD-3-Clause"
] | null | null | null |
json_to_relation/edxTrackLogJSONParser.py
|
paepcke/json_to_relation
|
acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9
|
[
"BSD-3-Clause"
] | 8
|
2015-05-16T14:33:33.000Z
|
2019-10-24T08:56:25.000Z
|
# Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Oct 2, 2013
:author: paepcke
Modifications:
* Jun 24, 2018: In findHashPattern(): deal with idStr not being a str.
* May 14, 2018: For selected events, use REGEXP to recognize.
* Dec 19, 2014: Fixed treatment of AB experiment related events.
* Oct 22, 2014: Added quarter column to EdxTrackEvent in support
* horizontal partitioning.
* Jun 23, 2014: Added support for AB Experiment events
* Dec 28, 2013: Col load_info_fk in EdxTrackEvent now properly typed to varchar(40) to match LoadInfo.load_info_id's type uses REPLACE INTO, rather than INSERT INTO
* Dec 28, 2013: Fixed some epydoc comment format errors.
* Dec 29, 2013: Caused the ALTER ENABLE KEYS section in the output .sql file
to be commented out. When loading multiple .sql files in a row,
these statements caused re-creation of indexes after each load,
adding significantly to the load time. Instead, manageEdxDb.py
handles the re-enabling.
In pushDBCreations() pushed a header comment into the output .sql
file to warn about the commented-out ALTER ENABLE KEYS. The comment
advises to uncomment if loading manually, i.e. not via
manageEdxDb.py.
'''
from collections import OrderedDict
import datetime
import hashlib
import json
import os
import re
import string
from unidecode import unidecode
import uuid
from col_data_type import ColDataType
from generic_json_parser import GenericJSONParser
from locationManager import LocationManager
from modulestoreImporter import ModulestoreImporter
from output_disposition import ColumnSpec
from ipToCountry import IpCountryDict
class AssessmentOptionSource():
LEARNER = 0,
INSTRUCTOR = 1
EDX_HEARTBEAT_PERIOD = 360 # seconds
class EdXTrackLogJSONParser(GenericJSONParser):
'''
Parser specialized for EdX track logs.
'''
# Class var to detect JSON strings that contain backslashes
# in front of chars other than \bfnrtu/. JSON allows backslashes
# only before those. But: /b, /f, /n, /r, /t, /u, and \\ also need to
# be escaped.
# Pattern used in makeSafeJSON()
#JSON_BAD_BACKSLASH_PATTERN = re.compile(r'\\([^\\bfnrtu/])')
JSON_BAD_BACKSLASH_PATTERN = re.compile(r'\\([^/"])')
# Regex patterns for extracting fields from bad JSON:
searchPatternDict = {}
searchPatternDict['username'] = re.compile(r"""
username[^:]* # The screen_name key
[^"']* # up to opening quote of the value
["'] # opening quote of the value
([^"']*) # the value
""", re.VERBOSE)
searchPatternDict['host'] = re.compile(r"""
host[^:]*
[^"']*
["']
([^"']*)
""", re.VERBOSE)
searchPatternDict['session'] = re.compile(r"""
session[^:]*
[^"']*
["']
([^"']*)
""", re.VERBOSE)
searchPatternDict['event_source'] = re.compile(r"""
event_source[^:]*
[^"']*
["']
([^"']*)
""", re.VERBOSE)
searchPatternDict['event_type'] = re.compile(r"""
event_type[^:]*
[^"']*
["']
([^"']*)
""", re.VERBOSE)
searchPatternDict['time'] = re.compile(r"""
time[^:]*
[^"']*
["']
([^"']*)
""", re.VERBOSE)
searchPatternDict['ip'] = re.compile(r"""
ip[^:]*
[^"']*
["']
([^"']*)
""", re.VERBOSE)
searchPatternDict['event'] = re.compile(r"""
[\\"']event[\\"'] # Event with possibly backslashed quotes
[^:]* # up to the colon
: # colon that separates key and value
(.*) # all of the rest of the string.
""", re.VERBOSE)
# Picking (likely) zip codes out of a string:
zipCodePattern = re.compile(r'[^0-9]([0-9]{5})')
hexGE32Digits = re.compile(r'[a-fA-F0-9]{32,}')
# Finding the word 'status' in problem_graded events:
# Extract problem ID and 'correct' or 'incorrect' from
# a messy problem_graded event string. Two cases:
# ' aria-describedby=\\"input_i4x-Medicine-SciWrite-problem-c3266c76a7854d02b881250a49054ddb_2_1\\">\\n incorrect\\n </p>\\n\\n'
# and
# 'aria-describedby=\\"input_i4x-Medicine-HRP258-problem-068a71cb1a1a4da39da093da2778f000_3_1_choice_2\\">Status: incorrect</span>'
# with lots of HTML and other junk around it.
problemGradedComplexPattern = re.compile(r'aria-describedby=[\\"]*(input[^\\">]*)[\\"]*[>nStatus\\:\s"]*([iIn]{0,2}correct)')
# isolate '-Medicine-HRP258-problem-8dd11b4339884ab78bc844ce45847141_2_1":' from:
# ' {"success": "correct", "correct_map": {"i4x-Medicine-HRP258-problem-8dd11b4339884ab78bc844ce45847141_2_1": {"hint": "", "mode": null'
problemXFindCourseID = re.compile(r'[^-]*([^:]*)')
# Isolate 32-bit hash inside any string, e.g.:
# i4x-Medicine-HRP258-videoalpha-7cd4bf0813904612bcd583a73ade1d54
# or:
# input_i4x-Medicine-HRP258-problem-98ca37dbf24849debcc29eb36811cb68_3_1_choice_3'
findHashPattern = re.compile(r'([a-f0-9]{32})')
def __init__(self,
jsonToRelationConverter,
mainTableName,
logfileID='',
progressEvery=1000,
replaceTables=False,
dbName='test',
useDisplayNameCache=False):
'''
Constructor
:param jsonToRelationConverter: JSONToRelation instance
:type jsonToRelationConverter: JSONToRelation
:param mainTableName: name wanted for the table into which the bulk of event data is placed
:type mainTableName: String
:param logfileID: an identfier of the tracking log file being processed. Used
to build error/warning msgs that cite a file and line number in
their text
:type logfileID: String
:param progressEvery: number of input lines, a.k.a. JSON objects after which logging should report total done
:type progressEvery: int
:param replaceTables: determines whether the tables that constitute EdX track logs are to be deleted before inserting entries. Default: False
:type replaceTables: bool
:param dbName: database name into which tables will be created (if replaceTables is True), and into which insertions will take place.
:type dbName: String
:param useDisplayNameCache: if True then use an existing cache for mapping
OpenEdx hashes to human readable display names is used. Else
the required information is read and parsed from a JSON file name
that contains the needed information from modulestore. See
modulestoreImporter.py for details.
:type useDisplayNameCache: Bool
'''
super(EdXTrackLogJSONParser, self).__init__(jsonToRelationConverter,
logfileID=logfileID,
progressEvery=progressEvery
)
self.mainTableName = mainTableName
self.dbName = dbName
self.setupMySqlDumpControlInstructions()
# Prepare as much as possible outside parsing of
# each line; Build the schema:
# Fields common to every request:
self.commonFldNames = ['agent',
'event_source',
'event_type',
'ip',
'page',
'session',
'time',
'username',
'course_id',
'course_display_name',
'context'
]
# A Country abbreviation lookup facility:
self.countryChecker = LocationManager()
# An ip-country lookup facility:
self.ipCountryDict = IpCountryDict()
# Lookup table from OpenEdx 32-bit hash values to
# corresponding problem, course, or video display_names.
# This call can cause a portion of the modulestore to be
# pulled from S3, which may cause exceptions. Those
# are caught and logged by the caller:
self.hashMapper = ModulestoreImporter(os.path.join(os.path.dirname(__file__),'data/modulestore_latest.json'),
useCache=useDisplayNameCache,
parent=self)
# Make a list of all short course names
# sorted by length in decreasing order.
# This list is used by extractCanonicalCourseName()
# to pull the most likely course name from a nasty
# string that has a course name embedded:
self.courseNamesSorted = sorted(self.hashMapper.keys(), key=len, reverse=True)
self.schemaHintsMainTable = OrderedDict()
self.schemaHintsMainTable['_id'] = ColDataType.UUID
self.schemaHintsMainTable['event_id'] = ColDataType.UUID # we generate this one ourselves; xlates to VARCHAR(40). Not unique!
self.schemaHintsMainTable['agent'] = ColDataType.TEXT
self.schemaHintsMainTable['event_source'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['event_type'] = ColDataType.TEXT
self.schemaHintsMainTable['ip_country'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['page'] = ColDataType.TEXT
self.schemaHintsMainTable['session'] = ColDataType.TEXT
self.schemaHintsMainTable['time'] = ColDataType.DATETIME
self.schemaHintsMainTable['quarter'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['anon_screen_name'] = ColDataType.TEXT
self.schemaHintsMainTable['downtime_for'] = ColDataType.DATETIME
# Students
self.schemaHintsMainTable['student_id'] = ColDataType.TEXT
# Instructors:
self.schemaHintsMainTable['instructor_id'] = ColDataType.TEXT
# Courses
self.schemaHintsMainTable['course_id'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['course_display_name'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['resource_display_name'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['organization'] = ColDataType.TINYTEXT
# Sequence navigation:
self.schemaHintsMainTable['sequence_id'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['goto_from'] = ColDataType.INT
self.schemaHintsMainTable['goto_dest'] = ColDataType.INT
# Problems:
self.schemaHintsMainTable['problem_id'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['problem_choice'] = ColDataType.TEXT
self.schemaHintsMainTable['question_location'] = ColDataType.TEXT
# Submissions:
self.schemaHintsMainTable['submission_id'] = ColDataType.TEXT
# Attempts:
self.schemaHintsMainTable['attempts'] = ColDataType.INT
# Answers
# Multiple choice answers are in their own table,
# called Answer. In this main table answerFK points
# to one entry in that table.
self.schemaHintsMainTable['long_answer'] = ColDataType.TEXT # essay answers
self.schemaHintsMainTable['student_file'] = ColDataType.TEXT
self.schemaHintsMainTable['can_upload_file'] = ColDataType.TINYTEXT
# Feedback
self.schemaHintsMainTable['feedback'] = ColDataType.TEXT
self.schemaHintsMainTable['feedback_response_selected'] = ColDataType.TINYINT
# Transcript
self.schemaHintsMainTable['transcript_id'] = ColDataType.TEXT
self.schemaHintsMainTable['transcript_code'] = ColDataType.TINYTEXT
# Rubrics:
self.schemaHintsMainTable['rubric_selection'] = ColDataType.INT
self.schemaHintsMainTable['rubric_category'] = ColDataType.INT
# Video:
self.schemaHintsMainTable['video_id'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['video_code'] = ColDataType.TEXT
self.schemaHintsMainTable['video_current_time'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['video_speed'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['video_old_time'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['video_new_time'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['video_seek_type'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['video_new_speed'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['video_old_speed'] = ColDataType.TINYTEXT
# Book (PDF) reading:
self.schemaHintsMainTable['book_interaction_type'] = ColDataType.TINYTEXT
# problem_check:
self.schemaHintsMainTable['success'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['answer_id'] = ColDataType.TEXT
self.schemaHintsMainTable['hint'] = ColDataType.TEXT
self.schemaHintsMainTable['mode'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['msg'] = ColDataType.TEXT
self.schemaHintsMainTable['npoints'] = ColDataType.TINYINT
self.schemaHintsMainTable['queuestate'] = ColDataType.TEXT
# Used in problem_rescore:
self.schemaHintsMainTable['orig_score'] = ColDataType.INT
self.schemaHintsMainTable['new_score'] = ColDataType.INT
self.schemaHintsMainTable['orig_total'] = ColDataType.INT
self.schemaHintsMainTable['new_total'] = ColDataType.INT
# For user group manipulations:
self.schemaHintsMainTable['event_name'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['group_user'] = ColDataType.TINYTEXT
self.schemaHintsMainTable['group_action'] = ColDataType.TINYTEXT #'add', 'remove'; called 'event' in JSON
# ajax
self.schemaHintsMainTable['position'] = ColDataType.INT # used in event ajax goto_position
# When bad JSON is encountered, it gets put
# into the following field:
self.schemaHintsMainTable['badly_formatted'] = ColDataType.TEXT
# Foreign keys to auxiliary tables:
self.schemaHintsMainTable['correctMap_fk'] = ColDataType.UUID
self.schemaHintsMainTable['answer_fk'] = ColDataType.UUID
self.schemaHintsMainTable['state_fk'] = ColDataType.UUID
self.schemaHintsMainTable['load_info_fk'] = ColDataType.UUID
# Schema hints need to be a dict that maps column names to ColumnSpec
# instances. The dict we built so far only the the column types. Go through
# and turn the dict's values into ColumnSpec instances:
for colName in self.schemaHintsMainTable.keys():
colType = self.schemaHintsMainTable[colName]
self.schemaHintsMainTable[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Establish the schema for the main table:
self.jsonToRelationConverter.setSchemaHints(self.schemaHintsMainTable)
# Schema for State table:
self.schemaStateTbl = OrderedDict()
self.schemaStateTbl['state_id'] = ColDataType.UUID
self.schemaStateTbl['seed'] = ColDataType.TINYINT
self.schemaStateTbl['done'] = ColDataType.TINYTEXT
self.schemaStateTbl['problem_id'] = ColDataType.TINYTEXT
self.schemaStateTbl['student_answer'] = ColDataType.UUID
self.schemaStateTbl['correct_map'] = ColDataType.UUID
self.schemaStateTbl['input_state'] = ColDataType.UUID
# Turn the SQL data types in the dict to column spec objects:
for colName in self.schemaStateTbl.keys():
colType = self.schemaStateTbl[colName]
self.schemaStateTbl[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Schema for Answer table:
self.schemaAnswerTbl = OrderedDict()
self.schemaAnswerTbl['answer_id'] = ColDataType.UUID
self.schemaAnswerTbl['problem_id'] = ColDataType.TINYTEXT
self.schemaAnswerTbl['answer'] = ColDataType.TEXT
self.schemaAnswerTbl['course_id'] = ColDataType.TINYTEXT
# Turn the SQL data types in the dict to column spec objects:
for colName in self.schemaAnswerTbl.keys():
colType = self.schemaAnswerTbl[colName]
self.schemaAnswerTbl[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Schema for CorrectMap table:
self.schemaCorrectMapTbl = OrderedDict()
self.schemaCorrectMapTbl['correct_map_id'] = ColDataType.UUID
self.schemaCorrectMapTbl['answer_identifier'] = ColDataType.TEXT
self.schemaCorrectMapTbl['correctness'] = ColDataType.TINYTEXT
self.schemaCorrectMapTbl['npoints'] = ColDataType.INT
self.schemaCorrectMapTbl['msg'] = ColDataType.TEXT
self.schemaCorrectMapTbl['hint'] = ColDataType.TEXT
self.schemaCorrectMapTbl['hintmode'] = ColDataType.TINYTEXT
self.schemaCorrectMapTbl['queuestate'] = ColDataType.TEXT
# Turn the SQL data types in the dict to column spec objects:
for colName in self.schemaCorrectMapTbl.keys():
colType = self.schemaCorrectMapTbl[colName]
self.schemaCorrectMapTbl[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Schema for InputState table:
self.schemaInputStateTbl = OrderedDict()
self.schemaInputStateTbl['input_state_id'] = ColDataType.UUID
self.schemaInputStateTbl['problem_id'] = ColDataType.TINYTEXT
self.schemaInputStateTbl['state'] = ColDataType.TEXT
# Turn the SQL data types in the dict to column spec objects:
for colName in self.schemaInputStateTbl.keys():
colType = self.schemaInputStateTbl[colName]
self.schemaInputStateTbl[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Schema for EventIp table:
self.schemaEventIpTbl = OrderedDict()
self.schemaEventIpTbl['event_table_id'] = ColDataType.UUID
self.schemaEventIpTbl['event_ip'] = ColDataType.TINYTEXT # IP address of respective event
# Turn the SQL data types in the dict to column spec objects:
for colName in self.schemaEventIpTbl.keys():
colType = self.schemaEventIpTbl[colName]
self.schemaEventIpTbl[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Schema for AB Experiment Events table:
self.schemaABExperimentTbl = OrderedDict()
self.schemaABExperimentTbl['event_table_id'] = ColDataType.UUID
self.schemaABExperimentTbl['event_type'] = ColDataType.TINYTEXT
self.schemaABExperimentTbl['anon_screen_name'] = ColDataType.UUID
self.schemaABExperimentTbl['group_id'] = ColDataType.INT
self.schemaABExperimentTbl['group_name'] = ColDataType.TINYTEXT
self.schemaABExperimentTbl['partition_id'] = ColDataType.INT
self.schemaABExperimentTbl['partition_name'] = ColDataType.TINYTEXT
self.schemaABExperimentTbl['child_module_id'] = ColDataType.TINYTEXT
self.schemaABExperimentTbl['resource_display_name'] = ColDataType.TINYTEXT
self.schemaABExperimentTbl['cohort_id'] = ColDataType.INT
self.schemaABExperimentTbl['cohort_name'] = ColDataType.TINYTEXT
self.schemaABExperimentTbl['course_display_name'] = ColDataType.TINYTEXT
# Turn the SQL data types in the dict to column spec objects:
for colName in self.schemaABExperimentTbl.keys():
colType = self.schemaABExperimentTbl[colName]
self.schemaABExperimentTbl[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Schema for OpenAssessment Events table:
self.schemaOpenAssessmentTbl = OrderedDict()
self.schemaOpenAssessmentTbl['event_table_id'] = ColDataType.UUID
self.schemaOpenAssessmentTbl['event_type'] = ColDataType.TINYTEXT
self.schemaOpenAssessmentTbl['anon_screen_name'] = ColDataType.UUID
self.schemaOpenAssessmentTbl['score_type'] = ColDataType.TINYTEXT
self.schemaOpenAssessmentTbl['submission_uuid'] = ColDataType.TINYTEXT
self.schemaOpenAssessmentTbl['edx_anon_id'] = ColDataType.TEXT
self.schemaOpenAssessmentTbl['time'] = ColDataType.DATETIME
self.schemaOpenAssessmentTbl['time_aux'] = ColDataType.DATETIME
self.schemaOpenAssessmentTbl['course_display_name'] = ColDataType.TINYTEXT
self.schemaOpenAssessmentTbl['resource_display_name'] = ColDataType.TINYTEXT
self.schemaOpenAssessmentTbl['resource_id'] = ColDataType.TINYTEXT
self.schemaOpenAssessmentTbl['submission_text'] = ColDataType.MEDIUMTEXT
self.schemaOpenAssessmentTbl['feedback_text'] = ColDataType.MEDIUMTEXT
self.schemaOpenAssessmentTbl['comment_text'] = ColDataType.MEDIUMTEXT
self.schemaOpenAssessmentTbl['attempt_num'] = ColDataType.INT
self.schemaOpenAssessmentTbl['options'] = ColDataType.TINYTEXT
self.schemaOpenAssessmentTbl['corrections'] = ColDataType.TEXT
self.schemaOpenAssessmentTbl['points'] = ColDataType.TEXT
# Turn the SQL data types in the dict to column spec objects:
for colName in self.schemaOpenAssessmentTbl.keys():
colType = self.schemaOpenAssessmentTbl[colName]
self.schemaOpenAssessmentTbl[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Schema for Account table:
self.schemaAccountTbl = OrderedDict()
self.schemaAccountTbl['account_id'] = ColDataType.UUID
self.schemaAccountTbl['screen_name'] = ColDataType.TEXT # chosen screen name
self.schemaAccountTbl['name'] = ColDataType.TEXT # actual name
self.schemaAccountTbl['anon_screen_name'] = ColDataType.TEXT
self.schemaAccountTbl['mailing_address'] = ColDataType.TEXT
self.schemaAccountTbl['zipcode'] = ColDataType.TINYTEXT # Picked out from mailing_address
self.schemaAccountTbl['country'] = ColDataType.TINYTEXT # Picked out from mailing_address
self.schemaAccountTbl['gender'] = ColDataType.TINYTEXT
self.schemaAccountTbl['year_of_birth'] = ColDataType.INT
self.schemaAccountTbl['level_of_education'] = ColDataType.TINYTEXT
self.schemaAccountTbl['goals'] = ColDataType.TEXT
self.schemaAccountTbl['honor_code'] = ColDataType.TINYINT
self.schemaAccountTbl['terms_of_service'] = ColDataType.TINYINT
self.schemaAccountTbl['course_id'] = ColDataType.TEXT
self.schemaAccountTbl['enrollment_action'] = ColDataType.TINYTEXT
self.schemaAccountTbl['email'] = ColDataType.TEXT
self.schemaAccountTbl['receive_emails'] = ColDataType.TINYTEXT
# Turn the SQL data types in the dict to column spec objects:
for colName in self.schemaAccountTbl.keys():
colType = self.schemaAccountTbl[colName]
self.schemaAccountTbl[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Schema for LoadInfo table:
self.schemaLoadInfoTbl = OrderedDict()
self.schemaLoadInfoTbl['load_info_id'] = ColDataType.UUID
self.schemaLoadInfoTbl['load_date_time'] = ColDataType.DATETIME
self.schemaLoadInfoTbl['load_file'] = ColDataType.TEXT
# Turn the SQL data types in the dict to column spec objects:
for colName in self.schemaLoadInfoTbl.keys():
colType = self.schemaLoadInfoTbl[colName]
self.schemaLoadInfoTbl[colName] = ColumnSpec(colName, colType, self.jsonToRelationConverter)
# Dict<IP,Datetime>: record each IP's most recent
# activity timestamp (heartbeat or any other event).
# Used to detect server downtimes:
self.downtimes = {}
# Place to keep history for some rows, for which we want
# to computer some on-the-fly aggregations:
self.resultDict = {}
# Create databases if needed:
self.pushDBCreations()
# Create main and auxiliary tables if appropriate:
self.pushTableCreations(replaceTables)
# Add an entry to the load_info table to reflect this
# load file and start of load:
loadInfoDict = OrderedDict()
loadInfoDict['load_info_id'] = None # filled in by pushLoadInfo()
loadInfoDict['load_date_time'] = self.jsonToRelationConverter.loadDateTime
loadInfoDict['load_file'] = self.jsonToRelationConverter.loadFile
self.currLoadInfoFK = self.pushLoadInfo(loadInfoDict)
self.currContext = None;
def setupMySqlDumpControlInstructions(self):
# Preamble for MySQL dumps to make loads fast:
self.dumpPreamble = "/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n" +\
"/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n" +\
"/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n" +\
"/*!40101 SET NAMES utf8 */;\n" +\
"/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;\n" +\
"/*!40103 SET TIME_ZONE='+00:00' */;\n" +\
"/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;\n" +\
"/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;\n" +\
"/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;\n" +\
"/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;\n"
# Preamble to table creation:
self.dumpTableCreationPreamble = "/*!40101 SET @saved_cs_client = @@character_set_client */;\n" +\
"/*!40101 SET character_set_client = utf8 */;\n"
# Construct the SQL statement that precedes INSERT statements:
self.dumpInsertPreamble = "LOCK TABLES `%s` WRITE, `State` WRITE, `InputState` WRITE, `Answer` WRITE, `CorrectMap` WRITE, `LoadInfo` WRITE, `Account` WRITE, `EventIp` WRITE, `ABExperiment` WRITE, `OpenAssessment` WRITE;\n" % self.mainTableName +\
"/*!40000 ALTER TABLE `%s` DISABLE KEYS */;\n" % self.mainTableName +\
"/*!40000 ALTER TABLE `State` DISABLE KEYS */;\n" +\
"/*!40000 ALTER TABLE `InputState` DISABLE KEYS */;\n" +\
"/*!40000 ALTER TABLE `Answer` DISABLE KEYS */;\n" +\
"/*!40000 ALTER TABLE `CorrectMap` DISABLE KEYS */;\n" +\
"/*!40000 ALTER TABLE `LoadInfo` DISABLE KEYS */;\n" +\
"/*!40000 ALTER TABLE `Account` DISABLE KEYS */;\n" +\
"/*!40000 ALTER TABLE `EventIp` DISABLE KEYS */;\n" +\
"/*!40000 ALTER TABLE `ABExperiment` DISABLE KEYS */;\n" +\
"/*!40000 ALTER TABLE `OpenAssessment` DISABLE KEYS */;\n"
# Add commented-out instructions for re-enabling keys.
# The ENABLE KEYS instructions are therefore disabled in
# the transform result .sql files. They need to be
# executed *somewhere*. If the .sql files are loaded
# from the CLI, the statements in the .sql files should
# be uncommented (remove just the '-- '!. Normally,
# the manageEdxDb.py script will do the re-enabling.
self.dumpPostscript1 = "-- /*!40000 ALTER TABLE `%s` ENABLE KEYS */;\n" % self.mainTableName +\
"-- /*!40000 ALTER TABLE `State` ENABLE KEYS */;\n" +\
"-- /*!40000 ALTER TABLE `InputState` ENABLE KEYS */;\n" +\
"-- /*!40000 ALTER TABLE `Answer` ENABLE KEYS */;\n" +\
"-- /*!40000 ALTER TABLE `CorrectMap` ENABLE KEYS */;\n" +\
"-- /*!40000 ALTER TABLE `LoadInfo` ENABLE KEYS */;\n" +\
"-- /*!40000 ALTER TABLE `Account` ENABLE KEYS */;\n" +\
"-- /*!40000 ALTER TABLE `EventIp` ENABLE KEYS */;\n" +\
"-- /*!40000 ALTER TABLE `ABExperiment` ENABLE KEYS */;\n" +\
"-- /*!40000 ALTER TABLE `OpenAssessment` ENABLE KEYS */;\n" +\
"UNLOCK TABLES;\n"
# In between Postscript1 and Postscript 2goes the Account and EventIp tables copy-to-private-db, and drop in Edx:
self.dumpPostscript2 = "/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;\n" +\
"/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;\n" +\
"/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;\n" +\
"/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;\n" +\
"/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n" +\
"/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n" +\
"/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n" +\
"/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;\n"
def processOneJSONObject(self, jsonStr, row):
'''
This method is the main dispatch for track log event_types.
It's a long method, and should be partitioned. First, bookkeeping
fields are filled in that are common to all events, such as the
user agent, and the reference into the LoadInfo table that shows
on which date this row was loaded. Then a long 'case' statement
calls handler methods depending on the incoming track log's event_type.
Given one line from the EdX Track log, produce one row
of relational output. Return is an array of values, the
same that is passed in. On the way, the partner JSONToRelation
object is called to ensure that JSON fields for which new columns
have not been created yet receive a place in the row array.
Different types of JSON records will be passed: server heartbeats,
dashboard accesses, account creations, user logins. Example record
for the latter::
{"username": "",
"host": "class.stanford.edu",
"event_source": "server",
"event_type": "/accounts/login",
"time": "2013-06-14T00:31:57.661338",
"ip": "98.230.189.66",
"event": "{
\"POST\": {},
\"GET\": {
\"next\": [\"/courses/Medicine/HRP258/Statistics_in_Medicine/courseware/80160e.../\"]}}",
"agent": "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20100101
Firefox/21.0",
"page": null}
Two more examples to show the variance in the format. Note "event" field:
Second example::
{"username": "jane",
"host": "class.stanford.edu",
"event_source": "server",
"event_type": "/courses/Education/EDUC115N/How_to_Learn_Math/modx/i4x://Education/EDUC115N/combinedopenended/c415227048464571a99c2c430843a4d6/get_results",
"time": "2013-07-31T06:27:06.222843+00:00",
"ip": "67.166.146.73",
"event": "{\"POST\": {
\"task_number\": [\"0\"]},
\"GET\": {}}",
"agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36",
"page": null
}
Third example::
{"username": "miller",
"host": "class.stanford.edu",
"session": "fa715506e8eccc99fddffc6280328c8b",
"event_source": "browser",
"event_type": "hide_transcript",
"time": "2013-07-31T06:27:10.877992+00:00",
"ip": "27.7.56.215",
"event": "{\"id\":\"i4x-Medicine-HRP258-videoalpha-09839728fc9c48b5b580f17b5b348edd\",
\"code\":\"fQ3-TeuyTOY\",
\"currentTime\":0}",
"agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36",
"page": "https://class.stanford.edu/courses/Medicine/HRP258/Statistics_in_Medicine/courseware/495757ee7b25401599b1ef0495b068e4/6fd116e15ab9436fa70b8c22474b3c17/"
}
:param jsonStr: string of a single, self contained JSON object
:type jsonStr: String
:param row: partially filled array of values. Passed by reference
:type row: List<<any>>
:return: the filled-in row
:rtype: [<any>]
'''
# No error has occurred yet in processing this JSON str:
self.errorOccurred = False
# self.jsonToRelationConverter.bumpLineCounter() #NOTE: counter bump happens already in j2r
try:
# Turn top level JSON object to dict:
try:
record = json.loads(str(jsonStr))
except ValueError as e:
# Try it again after cleaning up the JSON
# We don't do the cleanup routinely to save
# time.
try:
cleanJsonStr = self.makeJSONSafe(jsonStr)
record = json.loads(cleanJsonStr)
except ValueError as e:
# Pull out what we can, and place in 'badly_formatted' column
self.rescueBadJSON(jsonStr, row=row)
raise ValueError('Ill formed JSON: %s' % `e`)
# Dispense with the fields common to all events, except event,
# which is a nested JSON string. Results will be
# in self.resultDict:
self.handleCommonFields(record, row)
# If the event was fully handled in
# handleCommonFields(), then we're done:
if self.finishedRow:
return
# Now handle the different types of events:
try:
eventType = record['event_type']
except KeyError:
# New-type event, in which the event_type is a field
# called 'name' within the 'event' field:
try:
event = record['event']
except KeyError:
# No event record at all:
raise KeyError("No event field")
event = self.ensureDict(event)
if event is None:
raise KeyError("No properly formatted event field")
eventType = event.get('name', None)
if eventType is None:
raise KeyError("No event type field; cannot determine tye type of event.")
# Check whether we had a server downtime:
try:
ip = record['ip']
except KeyError:
raise ValueError("No server IP.")
eventDateTime = self.getEventTimeFromLogRecord(record)
try:
doRecordHeartbeat = False
recentSignOfLife = self.downtimes[ip]
# Get a timedelta obj w/ duration of time
# during which nothing was heard from server:
serverQuietTime = eventDateTime - recentSignOfLife
if serverQuietTime.seconds > EDX_HEARTBEAT_PERIOD:
self.setValInRow(row, 'downtime_for', str(serverQuietTime))
doRecordHeartbeat = True
# New recently-heard from this IP:
self.downtimes[ip] = eventDateTime
except KeyError:
# First sign of life for this IP:
self.downtimes[ip] = eventDateTime
# Record a time of 0 in downtime detection column:
self.setValInRow(row, 'downtime_for', str(datetime.timedelta()))
doRecordHeartbeat = True
if eventType.endswith('/heartbeat'):
# Handled heartbeat above, we don't transfer the heartbeats
# themselves into the relational world. If a server
# downtime was detected from the timestamp of this
# heartbeat, then above code added a respective warning
# into the row, else we just ignore the heartbeat
if not doRecordHeartbeat:
row = []
return
# If eventType is "/" then it was a ping, no more to be done:
if eventType == '/':
row = []
return
elif eventType == 'page_close':
# The page ID was already recorded in the common fields:
return
# For any event other than heartbeat, we need to look
# at the event field, which is an embedded JSON *string*
# Turn that string into a (nested) Python dict. Though
# *sometimes* the event *is* a dict, not a string, as in
# problem_check_fail:
try:
eventJSONStrOrDict = record['event']
except KeyError:
raise ValueError("Event of type %s has no event field" % eventType)
try:
if '{' not in eventJSON or eventJSON[:5] == 'input':
event = eventJSON # pass along simple case of problem_check/reset/save event (not JSON)
else:
event = json.loads(eventJSONStrOrDict)
except TypeError:
# Was already a dict
event = eventJSONStrOrDict
except Exception as e:
# Try it again after cleaning up the JSON
# We don't do the cleanup routinely to save
# time.
try:
cleanJSONStr = self.makeJSONSafe(eventJSONStrOrDict)
event = json.loads(cleanJSONStr)
except ValueError:
if '{' not in eventJSONStrOrDict:
event = eventJSONStrOrDict # pass along simple case of problem_check/reset/save event
# Last ditch: event types like goto_seq, need backslashes removed:
event = json.loads(re.sub(r'\\','',eventJSONStrOrDict))
except Exception as e1:
row = self.rescueBadJSON(str(record), row=row)
raise ValueError('Bad JSON; saved in col badlyFormatted: event_type %s (%s)' % (eventType, `e1`))
return
if eventType.endswith('seq_goto') or\
eventType.endswith('seq_next') or\
eventType.endswith('seq_prev'):
row = self.handleSeqNav(record, row, event, eventType)
return
elif eventType == '/accounts/login':
# Already recorded everything needed in common-fields
return
elif eventType == '/login_ajax':
row = self.handleAjaxLogin(record, row, event, eventType)
return
elif eventType.endswith('problem_check'):
# Note: some problem_check cases are also handled in handleAjaxLogin()
row = self.handleProblemCheck(record, row, event)
return
elif eventType.endswith('problem_reset'):
row = self.handleProblemReset(record, row, event)
return
elif eventType.endswith('problem_show'):
row = self.handleProblemShow(record, row, event)
return
elif eventType.endswith('problem_save'):
row = self.handleProblemSave(record, row, event)
return
elif eventType == 'oe_hide_question' or\
eventType == 'oe_hide_problem' or\
eventType == 'peer_grading_hide_question' or\
eventType == 'peer_grading_hide_problem' or\
eventType == 'staff_grading_hide_question' or\
eventType == 'staff_grading_hide_problem' or\
eventType == 'oe_show_question' or\
eventType == 'oe_show_problem' or\
eventType == 'peer_grading_show_question' or\
eventType == 'peer_grading_show_problem' or\
eventType == 'staff_grading_show_question' or\
eventType == 'staff_grading_show_problem':
row = self.handleQuestionProblemHidingShowing(record, row, event)
return
elif eventType == 'rubric_select':
row = self.handleRubricSelect(record, row, event)
return
elif eventType == 'oe_show_full_feedback' or\
eventType == 'oe_show_respond_to_feedback':
row = self.handleOEShowFeedback(record, row, event)
return
elif eventType == 'oe_feedback_response_selected':
row = self.handleOEFeedbackResponseSelected(record, row, event)
return
elif eventType == 'show_transcript' or eventType == 'hide_transcript':
row = self.handleShowHideTranscript(record, row, event)
return
elif eventType.endswith('play_video') or\
eventType.endswith('pause_video') or\
eventType.endswith('stop_video') or\
eventType.endswith('load_video'):
row = self.handleVideoPlayPause(record, row, event)
return
elif eventType.endswith('seek_video'):
row = self.handleVideoSeek(record, row, event)
return
elif eventType.endswith('speed_change_video'):
row = self.handleVideoSpeedChange(record, row, event)
return
elif eventType == 'fullscreen':
row = self.handleFullscreen(record, row, event)
return
elif eventType == 'not_fullscreen':
row = self.handleNotFullscreen(record, row, event)
return
elif eventType == '/dashboard':
# Nothing additional to grab:
return
# The 'startswith textbook.pdf' covers a whole
# famility of textbook related actions, such as
# textbook.pdf.thumbnails.toggled.
elif eventType == 'book' or\
eventType.startswith('textbook.pdf'):
row = self.handleBook(record, row, event)
return
elif eventType == 'showanswer' or eventType == 'show_answer':
row = self.handleShowAnswer(record, row, event)
return
elif eventType == 'problem_check_fail':
self.handleProblemCheckFail(record, row, event)
return
elif eventType == 'problem_rescore_fail':
row = self.handleProblemRescoreFail(record, row, event)
return
elif eventType == 'problem_rescore':
row = self.handleProblemRescore(record, row, event)
return
elif eventType == 'save_problem_fail' or\
eventType == 'save_problem_success' or\
eventType == 'save_problem_check' or\
eventType == 'reset_problem_fail':
row = self.handleSaveProblemFailSuccessCheckOrReset(record, row, event)
return
elif eventType == 'reset_problem':
row = self.handleResetProblem(record, row, event)
return
# Instructor events:
elif eventType in ['list-students', 'dump-grades', 'dump-grades-raw', 'dump-grades-csv',
'dump-grades-csv-raw', 'dump-answer-dist-csv', 'dump-graded-assignments-config',
'list-staff', 'list-instructors', 'list-beta-testers', 'edx.user.settings.changed'
]:
# These events have no additional info. The event_type says it all,
# and that's already been stuck into the table:
return
elif eventType == 'rescore-all-submissions' or eventType == 'reset-all-attempts':
self.handleRescoreReset(record, row, event)
return
elif eventType == 'delete-student-module-state' or eventType == 'rescore-student-submission':
self.handleDeleteStateRescoreSubmission(record, row, event)
return
elif eventType == 'reset-student-attempts':
self.handleResetStudentAttempts(record, row, event)
return
elif eventType == 'get-student-progress-page':
self.handleGetStudentProgressPage(record, row, event)
return
elif eventType == 'add-instructor' or eventType == 'remove-instructor':
self.handleAddRemoveInstructor(record, row, event)
return
elif eventType in ['list-forum-admins', 'list-forum-mods', 'list-forum-community-TAs']:
self.handleListForumMatters(record, row, event)
return
elif eventType in ['remove-forum-admin', 'add-forum-admin', 'remove-forum-mod',
'add-forum-mod', 'remove-forum-community-TA', 'add-forum-community-TA']:
self.handleForumManipulations(record, row, event)
return
elif eventType == 'psychometrics-histogram-generation':
self.handlePsychometricsHistogramGen(record, row, event)
return
elif eventType == 'add-or-remove-user-group':
self.handleAddRemoveUserGroup(record, row, event)
return
elif eventType == '/create_account':
self.handleCreateAccount(record, row, event)
return
elif eventType == 'problem_graded':
# Need to look at return, b/c this
# method handles all its own pushing:
row = self.handleProblemGraded(record, row, event)
return
elif eventType == 'change-email-settings':
self.handleReceiveEmail(record, row, event)
return
# A/B Test Events:
elif eventType in ['assigned_user_to_partition',
'xmodule.partitions.assigned_user_to_partition',
'child_render',
'xblock.split_test.child_render',
'edx.cohort.user_created',
'edx.cohort.user_added',
'edx.cohort.user_removed']:
self.handleABExperimentEvent(record, row, event)
return
# Peer/Self grading (open assessment):
elif eventType in ['openassessmentblock.get_peer_submission',
'openassessmentblock.peer_assess',
'openassessmentblock.self_assess',
'openassessmentblock.submit_feedback_on_assessments',
'openassessmentblock.create_submission',
'openassessmentblock.save_submission',
'openassessmentblock.upload_file',
'openassessmentblock.student_training_assess_example',
'openassessment.student_training_assess_example',
'openassessment.create_submission',
'openassessment.save_submission',
'openassessment.upload_file',
]:
self.handleOpenAssessmentEvent(record, row, event)
return
elif eventType == 'edx.course.enrollment.activated' or eventType == 'edx.course.enrollment.deactivated':
self.handleCourseEnrollActivatedDeactivated(record, row, event)
return
# Forum events:
elif eventType == 'edx.forum.searched':
self.handleForumEvent(record, row, event)
return
# Event type values that start with slash:
elif eventType[0] == '/':
self.handlePathStyledEventTypes(record, row, event)
return
else:
self.logWarn("Unknown event type '%s' in tracklog row %s" % (eventType, self.jsonToRelationConverter.makeFileCitation()))
return
except Exception as e:
# Note whether any error occurred, so that
# the finally clause can act accordingly:
self.errorOccurred = True
# Re-raise same error:
raise
finally:
self.reportProgressIfNeeded()
# If above code generated anything to INSERT into SQL
# table, do that now. If row is None, then nothing needs
# to be inserted (e.g. heartbeats):
if row is not None and len(row) != 0 and not self.errorOccurred:
self.jsonToRelationConverter.pushToTable(self.resultTriplet(row, self.mainTableName))
# Clean out data structures in preparation for next
# call to this method:
self.getReadyForNextRow()
def resultTriplet(self, row, targetTableName, colNamesToSet=None):
'''
Given an array of column names, and an array of column values,
construct the return triplet needed for JSONToRelation instance
to generate its INSERT statements (see JSONToRelation.prepareMySQLRow()).
:param row: array of column values
:type row: [<any>]
:param targetTableName: name of SQL table to which the result is directed. The caller
of processOneJSONObject() will create an INSERT statement
for that table.
:type targetTableName: String
:param colNamesToSet: array of strings listing column names in the order in which
their values appear in the row parameter. If None, we assume
the values are destined for the main event table, whose
schema is 'well known'.
:type colNamesToSet: [String]
:return: table name, string with all comma-separated column names, and values as a 3-tuple
:rtype: (String, String, [<any>])
'''
if colNamesToSet is None and targetTableName != self.mainTableName:
raise ValueError("If colNamesToSet is None, the target table must be the main table whose name was passed into __init__(); was %s" % targetTableName)
if colNamesToSet is not None:
return (targetTableName, ','.join(colNamesToSet), row)
else:
return (targetTableName, ','.join(self.colNamesByTable[targetTableName]), row)
def pushDBCreations(self):
# Put a header comment at the top of the .sql file-to-be,
# followwed by the db creation command:
header = "-- If loading this file from the Linux commandline or the\n" +\
"-- MySQL shell, then first remove the '-- ' chars from the\n" +\
"-- 'ALTER ENABLE KEYS' statements below. Keep those chars \n" +\
"-- in place if loading this .sql file via the manageEdxDb.py script,\n" +\
"-- as you should.\n"
self.jsonToRelationConverter.pushString(header)
createStatement = "CREATE DATABASE IF NOT EXISTS %s;\n" % 'Edx'
self.jsonToRelationConverter.pushString(createStatement)
createStatement = "CREATE DATABASE IF NOT EXISTS %s;\n" % 'EdxPrivate'
self.jsonToRelationConverter.pushString(createStatement)
def pushTableCreations(self, replaceTables):
'''
Pushes SQL statements to caller that create all tables, main and
auxiliary. After these CREATE statements, 'START TRANSACTION;\\n' is
pushed. The caller is responsible for pushing 'COMMIT;\\n' when all
subsequent INSERT statements have been pushed.
:param replaceTables: if True, then generated CREATE statements will first DROP the various tables.
If False, the CREATE statements will be IF NOT EXISTS
:type replaceTables: Boolean
'''
self.jsonToRelationConverter.pushString('USE %s;\n' % self.dbName)
self.jsonToRelationConverter.pushString(self.dumpPreamble)
if replaceTables:
# Need to suppress foreign key checks, so that we
# can DROP the tables; this includes the main Account tbl in db EdxPrivate
# and any left-over tmp Account tbl in Edx:
self.jsonToRelationConverter.pushString('DROP TABLE IF EXISTS %s, Answer, InputState, CorrectMap, State, Account, EdxPrivate.Account, LoadInfo, ABExperiment, OpenAssessment;\n' % self.mainTableName)
# Initialize col row arrays for each table. These
# are used in GenericJSONParser.setValInRow(), where
# the column names are added as values for them are
# set:
self.colNamesByTable[self.mainTableName] = []
self.colNamesByTable['Answer'] = []
self.colNamesByTable['CorrectMap'] = []
self.colNamesByTable['InputState'] = []
self.colNamesByTable['State'] = []
self.colNamesByTable['Account'] = []
self.colNamesByTable['LoadInfo'] = []
# Load acceleration for table creation:
self.jsonToRelationConverter.pushString(self.dumpTableCreationPreamble)
self.createAnswerTable()
self.createCorrectMapTable()
self.createInputStateTable()
self.createStateTable()
self.createAccountTable()
self.createEventIpTable()
self.createABExperimentTable()
self.createOpenAssessmentTable()
self.createLoadInfoTable()
self.createMainTable()
# Several switches to speed up the bulk load:
self.jsonToRelationConverter.pushString(self.dumpInsertPreamble)
def genOneCreateStatement(self, tableName, schemaDict, primaryKeyName=None, foreignKeyColNames=None, autoincrement=False):
'''
Given a table name and its ordered schema dict, generate
a basic SQL CREATE TABLE statement. Primary and foreign key names may
optionally be provided. An example of the most complex create statement generated
by this method is::
CREATE TABLE myTable
col1 VARCHAR(40) NOT NULL Primary Key,
col2 TEXT,
col3 INT,
col4 VARCHAR(32),
FOREIGN KEY(col3) REFERENCES OtherTable(int_col_name_there),
FOREIGN KEY(col4) REFERENCES YetOtherTable(varchar_col_name_there),
);
Example for the optional foreign key specification parameter
that would create the above example::
{'OtherTable' : ('col3', 'int_col_name_there'),
'YetOtherTable : ('col4', 'varchar_col_name_there')
}
:param tableName: name of table to be created
:type tableName: String
:param schemaDict: dict mapping column names to ColumnSpec objects
:type schemaDict: Dict<String,ColumnSpec>
:param primaryKeyName: name of the primary key column, if any
:type primaryKeyName: String
:param foreignKeyColNames: dict mapping foreign table names to tuples (localColName, foreignColName)
:type foreignKeyColNames: Dict<String,(String,String)>
:param autoincrement: whether this table's primary key is autoincrement
:type autoincrement: Boolean
'''
createStatement = "CREATE TABLE IF NOT EXISTS %s (\n" % tableName
for colname in schemaDict.keys():
if colname == primaryKeyName:
if autoincrement:
createStatement += "%s NOT NULL PRIMARY KEY AUTO_INCREMENT,\n" % schemaDict[colname].getSQLDefSnippet()
else:
createStatement += "%s NOT NULL PRIMARY KEY,\n" % schemaDict[colname].getSQLDefSnippet()
else:
createStatement += "%s NOT NULL,\n" % schemaDict[colname].getSQLDefSnippet()
if foreignKeyColNames is not None:
for foreignTableName in foreignKeyColNames.keys():
(localFldName, foreignKeyName) = foreignKeyColNames[foreignTableName]
createStatement += " FOREIGN KEY(%s) REFERENCES %s(%s) ON DELETE CASCADE,\n" % (localFldName, foreignTableName, foreignKeyName)
# Cut away the comma and newline after the last column spec,
# and add newline, closing paren, and semicolon:
createStatement = createStatement[0:-2] + '\n ) ENGINE=InnoDB'
if tableName == 'EdxTrackEvent':
createStatement += '\nPARTITION BY LIST COLUMNS(quarter) ( \n' +\
"PARTITION pAY2012_Winter VALUES IN ('winter2013'),\n" +\
"PARTITION pAY2012_Spring VALUES IN ('spring2013'),\n" +\
"PARTITION pAY2012_Summer VALUES IN ('summer2013'),\n" +\
"PARTITION pAY2013_Fall VALUES IN ('fall2013'),\n" +\
"PARTITION pAY2013_Winter VALUES IN ('winter2014'),\n" +\
"PARTITION pAY2013_Spring VALUES IN ('spring2014'),\n" +\
"PARTITION pAY2013_Summer VALUES IN ('summer2014'),\n" +\
"PARTITION pAY2014_Fall VALUES IN ('fall2014'),\n" +\
"PARTITION pAY2014_Winter VALUES IN ('winter2015'),\n" +\
"PARTITION pAY2014_Spring VALUES IN ('spring2015'),\n" +\
"PARTITION pAY2014_Summer VALUES IN ('summer2015'),\n" +\
"PARTITION pAY2015_Fall VALUES IN ('fall2015'),\n" +\
"PARTITION pAY2015_Winter VALUES IN ('winter2016'),\n" +\
"PARTITION pAY2015_Spring VALUES IN ('spring2016'),\n" +\
"PARTITION pAY2015_Summer VALUES IN ('summer2016'),\n" +\
"PARTITION pAY2016_Fall VALUES IN ('fall2016'),\n" +\
"PARTITION pAY2016_Winter VALUES IN ('winter2017'),\n" +\
"PARTITION pAY2016_Spring VALUES IN ('spring2017'),\n" +\
"PARTITION pAY2016_Summer VALUES IN ('summer2017'),\n" +\
"PARTITION pAY2017_Fall VALUES IN ('fall2017'),\n" +\
"PARTITION pAY2017_Winter VALUES IN ('winter2018'),\n" +\
"PARTITION pAY2017_Spring VALUES IN ('spring2018'),\n" +\
"PARTITION pAY2017_Summer VALUES IN ('summer2018')" +\
");\n"
else:
createStatement += ';\n'
return createStatement
def createAnswerTable(self):
createStatement = self.genOneCreateStatement('Answer', self.schemaAnswerTbl, primaryKeyName='answer_id')
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable('Answer', self.schemaAnswerTbl)
def createCorrectMapTable(self):
createStatement = self.genOneCreateStatement('CorrectMap',
self.schemaCorrectMapTbl,
primaryKeyName='correct_map_id')
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable('CorrectMap', self.schemaCorrectMapTbl)
def createInputStateTable(self):
createStatement = self.genOneCreateStatement('InputState',
self.schemaInputStateTbl,
primaryKeyName='input_state_id')
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable('InputState', self.schemaInputStateTbl)
def createStateTable(self):
# Make the foreign keys information dict ordered. Doesn't
# matter to SQL engine, but makes unittesting easier, b/c
# order of foreign key declarations will be constant on
# each run:
foreignKeysDict = OrderedDict()
foreignKeysDict['Answer'] = ('student_answer', 'answer_id')
foreignKeysDict['CorrectMap'] = ('correct_map', 'correct_map_id')
foreignKeysDict['InputState'] = ('input_state', 'input_state_id')
createStatement = self.genOneCreateStatement('State',
self.schemaStateTbl,
primaryKeyName='state_id',
foreignKeyColNames=foreignKeysDict)
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable('State', self.schemaStateTbl)
def createAccountTable(self):
# Create a tmp Account tbl in the Edx db for the load
# process:
createStatement = self.genOneCreateStatement('Account',
self.schemaAccountTbl,
primaryKeyName='account_id'
)
self.jsonToRelationConverter.pushString(createStatement)
# And one in db EdxPrivate, if it doesn't exist:
createStatement = self.genOneCreateStatement('EdxPrivate.Account',
self.schemaAccountTbl,
primaryKeyName='account_id'
)
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable('Account', self.schemaAccountTbl)
def createEventIpTable(self):
# Create a tmp EventIp tbl in the Edx db for the load
# process:
createStatement = self.genOneCreateStatement('EventIp',
self.schemaEventIpTbl,
primaryKeyName='event_table_id'
)
self.jsonToRelationConverter.pushString(createStatement)
createStatement = self.genOneCreateStatement('EdxPrivate.EventIp',
self.schemaEventIpTbl,
primaryKeyName='event_table_id'
)
# And one in db EdxPrivate, if it doesn't exist:
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable('Account', self.schemaAccountTbl)
def createABExperimentTable(self):
createStatement = self.genOneCreateStatement('ABExperiment',
self.schemaABExperimentTbl,
primaryKeyName='event_table_id'
)
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable('ABExperiment', self.schemaABExperimentTbl)
def createOpenAssessmentTable(self):
createStatement = self.genOneCreateStatement('OpenAssessment',
self.schemaOpenAssessmentTbl,
primaryKeyName='event_table_id'
)
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable('OpenAssessment', self.schemaOpenAssessmentTbl)
def createLoadInfoTable(self):
createStatement = self.genOneCreateStatement('LoadInfo',
self.schemaLoadInfoTbl,
primaryKeyName='load_info_id'
)
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable('LoadInfo', self.schemaLoadInfoTbl)
def createMainTable(self):
createStatement = self.genOneCreateStatement(self.mainTableName,
self.schemaHintsMainTable,
primaryKeyName='_id',
autoincrement=False)
# Used to be a good boy and declare the foreign keys
# as such to MySQL. But doing this then forces correct
# table delete sequences as MySQL insists on maintaining
# pointer integrity. So the decls are commented out:
# Make the foreign keys information dict ordered. Doesn't
# matter to SQL engine, but makes unittesting easier, b/c
# order of foreign key declarations will be constant on
# each run:
# foreignKeysDict = OrderedDict()
# foreignKeysDict['CorrectMap'] = ('correctMap_fk', 'correct_map_id')
# foreignKeysDict['Answer'] = ('answer_fk', 'answer_id')
# foreignKeysDict['State'] = ('state_fk', 'state_id')
# foreignKeysDict['LoadInfo'] = ('load_info_fk', 'load_info_id')
# createStatement = self.genOneCreateStatement(self.mainTableName,
# self.schemaHintsMainTable,
# primaryKeyName='_id',
# foreignKeyColNames=foreignKeysDict,
# autoincrement=False)
self.jsonToRelationConverter.pushString(createStatement)
# Tell the output module (output_disposition.OutputFile) that
# it needs to know about a new table. That module will create
# a CSV file and CSV writer to which rows destined for this
# table will be written:
self.jsonToRelationConverter.startNewTable(self.mainTableName, self.schemaHintsMainTable)
def handleCommonFields(self, record, row):
self.currCourseDisplayName = None
# Create a unique tuple key and event key for this event:
event_tuple_id = self.getUniqueID()
self.setValInRow(row, '_id', event_tuple_id)
self.setValInRow(row, 'event_id', self.getUniqueID())
self.finishedRow = False
for fldName in self.commonFldNames:
# Default non-existing flds to null:
val = record.get(fldName, None)
# Ensure there are no embedded single quotes or CR/LFs;
# takes care of name = O'Brian
if isinstance(val, basestring):
val = self.makeInsertSafe(val)
# if the event_type starts with a '/', followed by a
# class ID and '/about', treat separately:
if fldName == 'event_type' and val is not None:
if len(val) >= 6 and val[0] == '/' and val[-6:] == '/about':
self.setValInRow(row, 'course_id', val[0:-6])
val = 'about'
self.finishedRow = True
elif val.find('/password_reset_confirm') == 0:
val = 'password_reset_confirm'
self.finishedRow = True
elif val == '/networking/':
val = 'networking'
self.finishedRow = True
elif fldName == 'course_id':
(fullCourseName, course_id, displayName) = self.get_course_id(record) # @UnusedVariable
val = course_id
# Make course_id available for places where rows are added to the Answer table.
# We stick the course_id there for convenience.
self.currCourseID = course_id
self.currCourseDisplayName = displayName
elif fldName == 'course_display_name':
if self.currCourseDisplayName is not None:
val = self.currCourseDisplayName
else:
(fullCourseName, course_id, displayName) = self.get_course_id(record) # @UnusedVariable
val = displayName
elif fldName == 'time':
# Computer academic quarter from time and
# put into row:
quarter = self.getQuarter(self.getEventTimeFromLogTimeString(val))
self.setValInRow(row, 'quarter', quarter)
elif fldName == 'username':
# Hash the name, and store in MySQL col 'anon_screen_name':
if val is not None:
val = self.hashGeneral(val)
fldName = 'anon_screen_name'
elif fldName == 'ip':
ip = val
# For some server originating events
# the IP address is empty. Tolerate
# that:
if len(ip) == 0:
val = 'zzz'
else:
# Col value is to be three-letter country code;
# Get the triplet (2-letter-country-code, 3-letter-country-code, country):
val = self.getThreeLetterCountryCode(ip)
fldName = 'ip_country'
# The event row id and IP address go into
# a separate, temp table, to be transferred
# to EdxPrivate later:
eventIpDict = OrderedDict()
eventIpDict['event_table_id'] = event_tuple_id
eventIpDict['event_ip'] = ip
self.pushEventIpInfo(eventIpDict)
elif fldName == 'context':
# Handle here all fields of the context dict
# that are common. Then set self.currContext
# to the context value, i.e. the dict inside.
# These are course_id, org_id, and user_id.
# We leave out the user_id, b/c we don't want
# it in the tables: they have anon_screen_name
# instead. With self.currContextDict, all field
# handlers can grab what they need in their context:
self.currContextDict = self.ensureDict(val)
if self.currContextDict is not None:
theCourseId = self.currContextDict.get('course_id', None)
self.setValInRow(row, 'course_display_name', theCourseId)
# Sometimes context includes a 'module' field with a
# dict inside:
# "module": {"display_name": "Peer Assessment"}}
moduleDisplayName = self.findModuleNameInEventContext(record)
if len(moduleDisplayName) > 0:
self.setValInRow(row, 'resource_display_name', moduleDisplayName)
# Fill in the organization:
theOrg = self.currContextDict.get('org_id', None)
self.setValInRow(row, 'organization', theOrg)
# The following is no longer done. The only info that
# goes into the ABTest table now is assignments or
# reassignments, not every event about a learner that
# happens to be in an AB group.
# When a participant who is assigned to an AB experiment
# triggers an event, the context field course_user_tags
# contains a dict with the learner's partition and group
# assignment. Ensure that info about this event is recorded
# in the ABExperiment table:
##abTestInfo = self.currContextDict.get('course_user_tags', None)
##if abTestInfo is not None:
## eventType = record.get('event_type', None)
## self.addEventToABExperiment(event_tuple_id, eventType, self.currContextDict)
# Make course_id available for places where rows are added to the Answer table.
# We stick the course_id there for convenience.
self.currCourseID = theCourseId
self.currCourseDisplayName = theCourseId
# We took care of all fields in the context element, so go on to next common field:
continue
self.setValInRow(row, fldName, val)
# Add the foreign key that points to the current row in the load info table:
self.setValInRow(row, 'load_info_fk', self.currLoadInfoFK)
return row
def handleSeqNav(self, record, row, event, eventType):
'''
Video navigation. Events look like this::
{"username": "BetaTester1",
"host": "class.stanford.edu",
"session": "009e5b5e1bd4ab5a800cafc48bad9e44",
"event_source": "browser", "
event_type": "seq_goto",
"time": "2013-06-08T23:29:58.346222",
"ip": "24.5.14.103",
"event": "{\"old\":2,\"new\":1,\"id\":\"i4x://Medicine/HRP258/sequential/53b0357680d24191a60156e74e184be3\"}",
"agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0",
"page": "https://class.stanford.edu/courses/Medicine/HRP258/Statistics_in_Medicine/courseware/ac6d006c4bc84fc1a9cec412734fd5ca/53b0357680d24191a60156e74e184be3/"
}
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
:param eventType:
:type eventType:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in sequence navigation event." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in sequence navigation event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
oldIndex = event.get('old', 0)
newIndex = event.get('new', 0)
try:
seqID = event['id']
except KeyError:
self.logWarn("Track log line %s with event type %s is missing sequence id" %
(self.jsonToRelationConverter.makeFileCitation(), eventType))
return row
self.setValInRow(row, 'sequence_id', seqID)
self.setValInRow(row, 'goto_from', oldIndex)
self.setValInRow(row, 'goto_dest', newIndex)
# Try to find a display name for this sequence id:
self.setResourceDisplayName(row, seqID)
return row
def handleProblemCheck(self, record, row, event):
'''
The problem_check event comes in two flavors (assertained by observation):
The most complex is this one::
{
"success": "correct",
"correct_map": {
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": {
"hint": "",
"mode": null,
"correctness": "correct",
"msg": "",
"npoints": null,
"queuestate": null
},
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": {
"hint": "",
"hintmode": null,
"correctness": "correct",
"msg": "",
"npoints": null,
"queuestate": null
}
},
"attempts": 2,
"answers": {
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": "choice_0",
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": "choice_3"
},
"state": {
"student_answers": {
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": "choice_3",
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": "choice_1"
},
"seed": 1,
"done": true,
"correct_map": {
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": {
"hint": "",
"hintmode": null,
"correctness": "incorrect",
"msg": "",
"npoints": null,
"queuestate": null
},
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": {
"hint": "",
"hintmode": null,
"correctness": "incorrect",
"msg": "",
"npoints": null,
"queuestate": null
}
},
"input_state": {
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": {},
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": {}
}
},
"problem_id": "i4x://Medicine/HRP258/problem/e194bcb477104d849691d8b336b65ff6"
}
The simpler version is like this, in which the answers are styled as HTTP GET parameters::
{"username": "smitch",
"host": "class.stanford.edu",
"session": "75a8c9042ba10156301728f61e487414",
"event_source": "browser",
"event_type": "problem_check",
"time": "2013-08-04T06:27:13.660689+00:00",
"ip": "66.172.116.216",
"event": "\"input_i4x-Medicine-HRP258-problem-7451f8fe15a642e1820767db411a4a3e_2_1=choice_2&
input_i4x-Medicine-HRP258-problem-7451f8fe15a642e1820767db411a4a3e_3_1=choice_2\"",
"agent": "Mozilla/5.0 (Windows NT 6.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36",
"page": "https://class.stanford.edu/courses/Medicine/HRP258/Statistics_in_Medicine/courseware/de472d1448a74e639a41fa584c49b91e/ed52812e4f96445383bfc556d15cb902/"
}
We handle the complex version here, but call problemCheckSimpleCase()
for the simple case.
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in problem_check event." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
if isinstance(event, basestring):
# Simple case:
return self.handleProblemCheckSimpleCase(row, event)
# Complex case: event field should be a dict:
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in problem_check event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
# Go through all the top-level problem_check event fields first:
self.setValInRow(row, 'success', event.get('success', ''))
self.setValInRow(row, 'attempts', event.get('attempts', -1))
problem_id = event.get('problem_id', '')
self.setValInRow(row, 'problem_id', problem_id)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problem_id)
# correctMap field may consist of many correct maps.
# Create an entry for each in the CorrectMap table,
# collecting the resulting foreign keys:
correctMapsDict = event.get('correct_map', None)
if correctMapsDict is not None:
correctMapFKeys = self.pushCorrectMaps(correctMapsDict)
else:
correctMapFKeys = []
answersDict = event.get('answers', None)
if answersDict is not None:
# Receive all the Answer table keys generated for
# the answers, and a dict mapping each key
# to the problem ID to which that key's row
# in the Answer refers:
(answersFKeys, answerToProblemMap) = self.pushAnswers(answersDict)
else:
answersFKeys = []
stateDict = event.get('state', None)
if stateDict is not None:
stateFKeys = self.pushState(stateDict)
else:
stateFKeys = []
# Now need to generate enough near-replicas of event
# entries to cover all correctMap, answers, and state
# foreign key entries that were created:
generatedAllRows = False
indexToFKeys = 0
# Generate main table rows that refer to all the
# foreign entries we made above to Answer, CorrectMap, and State
# We make as few rows as possible by filling in
# columns in all three foreign key entries, until
# we run out of all references:
while not generatedAllRows:
try:
correctMapFKey = correctMapFKeys[indexToFKeys]
except IndexError:
correctMapFKey = None
try:
answerFKey = answersFKeys[indexToFKeys]
except IndexError:
answerFKey = None
try:
stateFKey = stateFKeys[indexToFKeys]
except IndexError:
stateFKey = None
# Have we created rows to cover all student_answers, correct_maps, and input_states?
if correctMapFKey is None and answerFKey is None and stateFKey is None:
generatedAllRows = True
continue
# Fill in one main table row.
self.setValInRow(row, 'correctMap_fk', correctMapFKey if correctMapFKey is not None else '')
self.setValInRow(row, 'answer_fk', answerFKey if answerFKey is not None else '')
if answerFKey is not None:
# For convenience: enter the Answer's problem ID
# in the main table's problemID field:
problemID = answerToProblemMap[answerFKey] if answerToProblemMap[answerFKey] is not None else ''
self.setValInRow(row, 'problem_id', problemID)
self.setValInRow(row, 'state_fk', stateFKey if stateFKey is not None else '')
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
indexToFKeys += 1
# Return empty row, b/c we already pushed all necessary rows:
return []
def handleProblemCheckSimpleCase(self, row, event):
'''
Handle the simple case of problem_check type events.
Their event field has this form::
"event": "\"input_i4x-Medicine-HRP258-problem-7451f8fe15a642e1820767db411a4a3e_2_1=choice_2&
input_i4x-Medicine-HRP258-problem-7451f8fe15a642e1820767db411a4a3e_3_1=choice_2\"",
The problems and proposed solutions are styled like HTTP GET request parameters.
:param row:
:type row:
:param event:
:type event:
'''
# Easy case: event field is GET-styled list of problem ID/choices.
# Separate all (&-separated) answers into strings like 'problem10=choice_2':
problemAnswers = event.split('&')
# Build a map problemID-->answer:
answersDict = {}
for problemID_choice in problemAnswers:
try:
# Pull elements out from GET parameter strings like 'problemID=choice_2'
(problemID, answerChoice) = problemID_choice.split('=')
answersDict[problemID] = self.makeInsertSafe(answerChoice)
except ValueError:
# Badly formatted GET parameter element:
self.logWarn("Track log line %s: badly formatted problemID/answerChoice GET parameter pair: '%s'." %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
if len(answersDict) > 0:
# Receive all the Answer table keys generated for
# the answers, and a dict mapping each key
# to the problem ID to which that key's row
# in the Answer refers:
(answersFKeys, answerToProblemMap) = self.pushAnswers(answersDict)
else:
answersFKeys = []
# Now need to generate enough near-replicas of event
# entries to cover all answers, putting one Answer
# table key into the answers foreign key column each
# time:
for answerFKey in answersFKeys:
# Fill in one main table row.
self.setValInRow(row, 'answer_fk', answerFKey, self.mainTableName)
if answerFKey is not None:
# For convenience: enter the Answer's problem ID
# in the main table's problemID field:
problemID = answerToProblemMap[answerFKey]
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
# Return empty row, b/c we already pushed all necessary rows:
return []
def pushCorrectMaps(self, correctMapsDict):
'''
Get dicts like this::
{"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": {
"hint": "",
"hintmode": null,
"correctness": "correct",
"msg": "",
"npoints": null,
"queuestate": null
},
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": {
"hint": "",
"hintmode": null,
"correctness": "correct",
"msg": "",
"npoints": null,
"queuestate": null
}
}
The above has two correctmaps.
:param correctMapsDict: dict of CorrectMap dicts
:type correctMapsDict: Dict<String, Dict<String,String>>
:return: array of unique keys, one key for each CorrectMap row the method has added.
In case of the above example that would be two keys (uuids)
:rtype: [String]
'''
# We'll create uuids for each new CorrectMap row
# we create. We collect these uuids in the following
# array, and return them to the caller. The caller
# will then use them as foreign keys in the Event
# table:
correctMapUniqKeys = []
for answerKey in correctMapsDict.keys():
answer_id = answerKey
oneCorrectMapDict = correctMapsDict[answerKey]
hint = oneCorrectMapDict.get('hint', '')
if hint is None:
hint = ''
mode = oneCorrectMapDict.get('hintmode', '')
if mode is None:
mode = ''
correctness = oneCorrectMapDict.get('correctness', '')
if correctness is None:
correctness = ''
msg = oneCorrectMapDict.get('msg', '')
if msg is None:
msg = ''
else:
msg = self.makeInsertSafe(msg)
npoints = oneCorrectMapDict.get('npoints', -1)
if npoints is None:
npoints = -1
# queuestate:
# Dict {key:'', time:''} where key is a secret string, and time is a string dump
# of a DateTime object in the format '%Y%m%d%H%M%S'. Is None when not queued
queuestate = oneCorrectMapDict.get('queuestate', '')
if queuestate is None:
queuestate = ''
if len(queuestate) > 0:
queuestate_key = queuestate.get('key', '')
queuestate_time = queuestate.get('time', '')
queuestate = queuestate_key + ":" + queuestate_time
# Unique key for the CorrectMap entry (and foreign
# key for the Event table):
correct_map_id = self.getUniqueID()
correctMapUniqKeys.append(correct_map_id)
correctMapValues = [correct_map_id,
answer_id,
correctness,
npoints,
msg,
hint,
mode,
queuestate]
self.jsonToRelationConverter.pushToTable(self.resultTriplet(correctMapValues, 'CorrectMap', self.schemaCorrectMapTbl.keys()))
# Return the array of RorrectMap row unique ids we just
# created and pushed:
return correctMapUniqKeys
def pushAnswers(self, answersDict):
'''
Gets structure like this::
"answers": {
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": "choice_0",
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": "choice_3"
}
:param answersDict:
:type answersDict:
:return: array of keys created for answers in answersDict, and a dict mapping each key to the
corresponding problem ID
:rtype: ([String], Dict<String,String>
'''
answersKeys = []
answerToProblemMap = {}
for problemID in answersDict.keys():
answer = answersDict.get(problemID, None)
# answer could be an array of Unicode strings, or
# a single string: u'choice_1', or [u'choice_1'] or [u'choice_1', u'choice_2']
# below: turn into latin1, comma separated single string.
# Else Python prints the "u'" into the INSERT statement
# and makes MySQL unhappy:
if answer is not None:
if isinstance(answer, list):
answer = self.makeInsertSafe(','.join(answer))
else:
answer = self.makeInsertSafe(answer)
answersKey = self.getUniqueID()
answerToProblemMap[answersKey] = problemID
answersKeys.append(answersKey)
answerValues = [answersKey, # answer_id fld
problemID, # problem_id fld
answer,
self.currCourseID
]
self.jsonToRelationConverter.pushToTable(self.resultTriplet(answerValues, 'Answer', self.schemaAnswerTbl.keys()))
return (answersKeys, answerToProblemMap)
def pushState(self, stateDict):
'''
We get a structure like this::
{
"student_answers": {
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": "choice_3",
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": "choice_1"
},
"seed": 1,
"done": true,
"correct_map": {
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": {
"hint": "",
"hintmode": null,
"correctness": "incorrect",
"msg": "",
"npoints": null,
"queuestate": null
},
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": {
"hint": "",
"hintmode": null,
"correctness": "incorrect",
"msg": "",
"npoints": null,
"queuestate": null
}
},
"input_state": {
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": {},
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": {}
}
}
:param stateDict:
:type stateDict:
:return: array of keys into State table that were created in this method
:rtype: [String]
'''
stateFKeys = []
studentAnswersDict = stateDict.get('student_answers', None)
if studentAnswersDict is not None:
# Receive all the Answer table keys generated for
# the answers, and a dict mapping each key
# to the problem ID to which that key's row
# in the Answer refers:
(studentAnswersFKeys, answerToProblemMap) = self.pushAnswers(studentAnswersDict) # @UnusedVariable
else:
studentAnswersFKeys = []
seed = stateDict.get('seed', '')
done = stateDict.get('done', '')
# Can't use int for SQL, b/c Python writes as 'True'
done = str(done)
problemID = stateDict.get('problem_id', '')
correctMapsDict = stateDict.get('correct_map', None)
if correctMapsDict is not None:
correctMapFKeys = self.pushCorrectMaps(correctMapsDict)
else:
correctMapFKeys = []
inputStatesDict = stateDict.get('input_state', None)
if inputStatesDict is not None:
inputStatesFKeys = self.pushInputStates(inputStatesDict)
else:
inputStatesFKeys = []
# Now generate enough State rows to reference all student_answers,
# correctMap, and input_state entries. That is, flatten the JSON
# structure across relations State, Answer, CorrectMap, and InputState:
generatedAllRows = False
indexToFKeys = 0
while not generatedAllRows:
try:
studentAnswerFKey = studentAnswersFKeys[indexToFKeys]
except IndexError:
studentAnswerFKey = None
try:
correctMapFKey = correctMapFKeys[indexToFKeys]
except IndexError:
correctMapFKey = None
try:
inputStateFKey = inputStatesFKeys[indexToFKeys]
except IndexError:
inputStateFKey = None
# Have we created rows to cover all student_answers, correct_maps, and input_states?
if studentAnswerFKey is None and correctMapFKey is None and inputStateFKey is None:
generatedAllRows = True
continue
studentAnswerFKey = studentAnswerFKey if studentAnswerFKey is not None else ''
correctMapFKey = correctMapFKey if correctMapFKey is not None else ''
inputStateFKey = inputStateFKey if inputStateFKey is not None else ''
# Unique ID that ties all these related rows together:
state_id = self.getUniqueID()
stateFKeys.append(state_id)
stateValues = [state_id, seed, done, problemID, studentAnswerFKey, correctMapFKey, inputStateFKey]
rowInfoTriplet = self.resultTriplet(stateValues, 'State', self.schemaStateTbl.keys())
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
indexToFKeys += 1
return stateFKeys
def pushInputStates(self, inputStatesDict):
'''
Gets structure like this::
{
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_3_1": {},
"i4x-Medicine-HRP258-problem-e194bcb477104d849691d8b336b65ff6_2_1": {}
}
:param inputStatesDict:
:type inputStatesDict:
:return: array of keys created for input state problems.
:rtype: [String]
'''
inputStateKeys = []
for problemID in inputStatesDict.keys():
inputStateProbVal = inputStatesDict.get(problemID, None)
if inputStateProbVal is not None:
# If prob value is an empty dict (as in example above),
# then make it an empty str, else the value will show up as
# {} in the VALUES part of the INSERT statements, and
# MySQL will get cranky:
try:
if len(inputStateProbVal) == 0:
inputStateProbVal = ''
except:
pass
inputStateKey = self.getUniqueID()
inputStateKeys.append(inputStateKey)
inputStateValues = [inputStateKey,
problemID,
inputStateProbVal
]
self.jsonToRelationConverter.pushToTable(self.resultTriplet(inputStateValues, 'InputState', self.schemaInputStateTbl.keys()))
return inputStateKeys
def pushEventIpInfo(self, eventIpDict):
'''
Takes an ordered dict with two fields:
the _id field of the current main table event
under key event_table_id, and an IP address.
:param eventCountryDict: dict with main table _id, and 3-char country code
:type eventCountryDict: {String : String}
'''
self.jsonToRelationConverter.pushToTable(self.resultTriplet(eventIpDict.values(),
'EventIp',
self.schemaEventIpTbl.keys()))
return
def pushABExperimentInfo(self, abExperimentDict):
'''
Takes an ordered dict with fields:
- 'event_table_id' : EdxTrackEvent _id field
- 'event_type : type of event that caused need for this row
- 'anon_screen_name': ...
- 'group_id' : experimental group's ID
- 'group_name' : experimental group's name
- 'partition_id' : experimental partition's id
- 'partition_name' : experimental partition's name
- 'child_module_id': id of module within partition that was served to a participant
- 'resource_display_name' : human readable name of module
- 'cohort_id' : numeric ID of cohort group
- 'cohort_name' : string name of cohort group
- 'course_display_name' : course name
:param abExperimentDict: Ordered dict with all required ABExperiment table column values
:type abExperimentDict: {STRING : STRING, STRING : INT, STRING : STRING, STRING : INT, STRING : STRING, STRING : STRING}
'''
self.jsonToRelationConverter.pushToTable(self.resultTriplet(abExperimentDict.values(), 'ABExperiment', self.schemaABExperimentTbl.keys()))
return
def pushOpenAssessmentInfo(self, openAssessmentDict):
'''
Takes an ordered dict with fields:
event_table_id
event_type
anon_screen_name
score_type VARCHAR(2)
submission_uuid
edx_anon_id
time DATETIME
aux_time DATETIME
course_display_name
resource_display_name
resource_id
submission_text TEXT
feedback_text TEXT
comment_text TEXT
attempt_num
options
corrections
points
:param openAssessmentDict: Ordered dict with all required OpenAssessment table column values
:type openAssessmentDict: Dict
'''
self.jsonToRelationConverter.pushToTable(self.resultTriplet(openAssessmentDict.values(), 'OpenAssessment', self.schemaOpenAssessmentTbl.keys()))
return
def pushAccountInfo(self, accountDict):
'''
Takes an ordered dict with the fields of
a create_account event. Pushes the values
(name, address, email, etc.) as a row to the
Account table, and returns the resulting row
primary key for inclusion in the main table's
accountFKey field.
:param accountDict:
:type accountDict:
'''
accountDict['account_id'] = self.getUniqueID()
self.jsonToRelationConverter.pushToTable(self.resultTriplet(accountDict.values(), 'Account', self.schemaAccountTbl.keys()))
return
def pushLoadInfo(self, loadDict):
#loadDict['load_info_id'] = self.getUniqueID()
# Make the primary-key row ID from the load file
# basename, so that it is reproducible:
loadDict['load_info_id'] = self.hashGeneral(loadDict['load_file'])
self.jsonToRelationConverter.pushToTable(self.resultTriplet(loadDict.values(), 'LoadInfo', self.schemaLoadInfoTbl.keys()))
return loadDict['load_info_id']
def handleProblemReset(self, record, row, event):
'''
Gets a event string like this::
"{\"POST\": {\"id\": [\"i4x://Engineering/EE222/problem/e68cfc1abc494dfba585115792a7a750@draft\"]}, \"GET\": {}}"
After turning this JSON into Python::
{u'POST': {u'id': [u'i4x://Engineering/EE222/problem/e68cfc1abc494dfba585115792a7a750@draft']}, u'GET': {}}
Or the event could be simpler, like this::
u'input_i4x-Engineering-QMSE01-problem-dce5fe9e04be4bc1932efb05a2d6db68_2_1=2'
In the latter case we just put that string into the problemID field
of the main table
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
# #*********************
# self.logWarn("*******record: %s (%s)(%s) " %\
# (record, self.jsonToRelationConverter.makeFileCitation(), str(event)))
# self.logWarn("*******row: %s (%s)(%s) " %\
# (row, self.jsonToRelationConverter.makeFileCitation(), str(event)))
# self.logWarn("*******event: %s (%s)(%s) " %\
# (event, self.jsonToRelationConverter.makeFileCitation(), str(event)))
# #*********************
if event is None:
self.logWarn("Track log line %s: missing event text in event type problem_reset." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
# From "{\"POST\": {\"id\": [\"i4x://Engineering/EE368/problem/ab656f3cb49e4c48a6122dc724267cb6@draft\"]}, \"GET\": {}}"
# make a dict:
postGetDict = self.ensureDict(event)
if postGetDict is None:
if isinstance(event, basestring):
self.setValInRow(row, 'problem_id', event)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, event)
return row
else:
self.logWarn("Track log line %s: event is not a dict in problem_reset event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
# Get the POST field's problem id array:
try:
problemIDs = postGetDict['POST']['id']
except KeyError:
self.logWarn("Track log line %s with event type problem_reset contains event without problem ID array: '%s'" %
(self.jsonToRelationConverter.makeFileCitation(), event))
return row
self.setValInRow(row, 'problem_id', problemIDs)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemIDs)
return row
def handleProblemShow(self, record, row, event):
'''
Gets a event string like this::
"{\"problem\":\"i4x://Medicine/HRP258/problem/c5cf8f02282544729aadd1f9c7ccbc87\"}"
After turning this JSON into Python::
{u'problem': u'i4x://Medicine/HRP258/problem/c5cf8f02282544729aadd1f9c7ccbc87'}
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in event type problem_show." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
# From "{\"POST\": {\"id\": [\"i4x://Engineering/EE368/problem/ab656f3cb49e4c48a6122dc724267cb6@draft\"]}, \"GET\": {}}"
# make a dict:
postGetDict = self.ensureDict(event)
if postGetDict is None:
self.logWarn("Track log line %s: event is not a dict in problem_show event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
# Get the problem id:
try:
problemID = postGetDict['problem']
except KeyError:
self.logWarn("Track log line %s with event type problem_show contains event without problem ID: '%s'" %
(self.jsonToRelationConverter.makeFileCitation(), event))
return row
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
return row
def handleProblemSave(self, record, row, event):
'''
Gets a event string like this::
"\"input_i4x-Medicine-HRP258-problem-44c1ef4e92f648b08adbdcd61d64d558_2_1=13.4&
input_i4x-Medicine-HRP258-problem-44c1ef4e92f648b08adbdcd61d64d558_3_1=2.49&
input_i4x-Medicine-HRP258-problem-44c1ef4e92f648b08adbdcd61d64d558_4_1=13.5&
input_i4x-Medicine-HRP258-problem-44c1ef4e92f648b08adbdcd61d64d558_5_1=3\""
After splitting this string on '&', and then each result on '=', we add the
problemID/solution pairs to the Answer table:
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in event type problem_save." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
if not isinstance(event, basestring):
self.logWarn("Track log line %s: event is not a string in problem save event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
probIDSolPairs = event.split('&')
answersDict = {}
for probIDSolPair in probIDSolPairs:
(problemID, choice) = probIDSolPair.split('=')
answersDict[problemID] = choice
# Add answer/solutions to Answer table.
# Receive all the Answer table keys generated for
# the answers, and a dict mapping each key
# to the problem ID to which that key's row
# in the Answer refers:
if len(answersDict) > 0:
(answersFKeys, answerToProblemMap) = self.pushAnswers(answersDict)
else:
answersFKeys = []
# Now need to generate enough near-replicas of event
# entries to cover all answer
# foreign key entries that were created:
for answerFKey in answersFKeys:
# Fill in one main table row.
self.setValInRow(row, 'answer_fk', answerFKey)
if answerFKey is not None:
# For convenience: enter the Answer's problem ID
# in the main table's problemID field:
problemID = answerToProblemMap[answerFKey]
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
# Return empty row, b/c we already pushed all necessary rows:
return []
def handleQuestionProblemHidingShowing(self, record, row, event):
'''
Gets a event string like this::
"{\"location\":\"i4x://Education/EDUC115N/combinedopenended/c8af7daea1f54436b0b25930b1631845\"}"
After importing from JSON into Python::
{u'location': u'i4x://Education/EDUC115N/combinedopenended/c8af7daea1f54436b0b25930b1631845'}
'''
if event is None:
self.logWarn("Track log line %s: missing event text in question hide or show." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
"{\"location\":\"i4x://Education/EDUC115N/combinedopenended/4abb8b47b03d4e3b8c8189b3487f4e8d\"}"
# make a dict:
locationDict = self.ensureDict(event)
if locationDict is None:
self.logWarn("Track log line %s: event is not a dict in problem_show/hide event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
# Get location:
try:
location = locationDict['location']
except KeyError:
self.logWarn("Track log line %s: no location field provided in problem hide or show event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
self.setValInRow(row, 'question_location', location)
return row
def handleRubricSelect(self, record, row, event):
'''
Gets a event string like this::
"{\"location\":\"i4x://Education/EDUC115N/combinedopenended/4abb8b47b03d4e3b8c8189b3487f4e8d\",\"selection\":\"1\",\"category\":0}"
{u'category': 0, u'selection': u'1', u'location': u'i4x://Education/EDUC115N/combinedopenended/4abb8b47b03d4e3b8c8189b3487f4e8d'}
'''
if event is None:
self.logWarn("Track log line %s: missing event text in select_rubric." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
# From "{\"location\":\"i4x://Education/EDUC115N/combinedopenended/4abb8b47b03d4e3b8c8189b3487f4e8d\",\"selection\":\"1\",\"category\":0}"
# make a dict:
locationDict = self.ensureDict(event)
if locationDict is None:
self.logWarn("Track log line %s: event is not a dict in select_rubric event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
try:
location = locationDict['location']
selection = locationDict['selection']
category = locationDict['category']
except KeyError:
self.logWarn("Track log line %s: missing location, selection, or category in event type select_rubric." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
self.setValInRow(row, 'question_location', location)
self.setValInRow(row, 'rubric_selection', selection)
self.setValInRow(row, 'rubric_category', category)
return row
def handleOEShowFeedback(self, record, row, event):
'''
All examples seen as of this writing had this field empty: "{}"
'''
# Just stringify the dict and make it the field content:
self.setValInRow(row, 'feedback', str(event))
def handleOEFeedbackResponseSelected(self, record, row, event):
'''
Gets a event string like this::
"event": "{\"value\":\"5\"}"
After JSON import into Python::
{u'value': u'5'}
'''
if event is None:
self.logWarn("Track log line %s: missing event text in oe_feedback_response_selected." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
# From "{\"value\":\"5\"}"
# make a dict:
valDict = self.ensureDict(event)
if valDict is None:
self.logWarn("Track log line %s: event is not a dict in oe_feedback_response_selected event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
try:
value = valDict['value']
except KeyError:
self.logWarn("Track log line %s: missing 'value' field in event type oe_feedback_response_selected." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
self.setValInRow(row, 'feedback_response_selected', value)
def handleVideoPlayPause(self, record, row, event):
'''
For play_video, event looks like this::
"{\"id\":\"i4x-Education-EDUC115N-videoalpha-c41e588863ff47bf803f14dec527be70\",\"code\":\"html5\",\"currentTime\":0}"
For pause_video::
"{\"id\":\"i4x-Education-EDUC115N-videoalpha-c5f2fd6ee9784df0a26984977658ad1d\",\"code\":\"html5\",\"currentTime\":124.017784}"
For stop_video::
"{\"id\":\"i4x-Engineering-CS101-video-z57\",\"currentTime\":632.729,\"code\":\"mQNhppYqfBk\"}"
For load_video::
"{\"id\":\"i4x-Education-EDUC115N-videoalpha-003bc44b4fd64cb79cdfd459e93a8275\",\"code\":\"4GlF1t_5EwI\"}"
'''
if event is None:
self.logWarn("Track log line %s: missing event text in video play or pause." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
valsDict = self.ensureDict(event)
if valsDict is None:
self.logWarn("Track log line %s: event is not a dict in video play/pause: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
videoID = valsDict.get('id', None)
self.setResourceDisplayName(row, videoID)
videoCode = valsDict.get('code', None)
videoCurrentTime = str(valsDict.get('currentTime', None))
videoSpeed = str(valsDict.get('speed', None))
self.setValInRow(row, 'video_id', str(videoID))
self.setValInRow(row, 'video_code', str(videoCode))
self.setValInRow(row, 'video_current_time', str(videoCurrentTime))
self.setValInRow(row, 'video_speed', str(videoSpeed))
return row
def handleVideoSeek(self, record, row, event):
'''
For play_video, event looks like this::
"{\"id\":\"i4x-Medicine-HRP258-videoalpha-413d6a45b82848339ab5fd3836dfb928\",
\"code\":\"html5\",
\"old_time\":308.506103515625,
\"new_time\":290,
\"type\":\"slide_seek\"}"
'''
if event is None:
self.logWarn("Track log line %s: missing event text in video seek." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
valsDict = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in video play/pause: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
videoID = valsDict.get('id', None)
self.setResourceDisplayName(row, videoID)
videoCode = valsDict.get('code', None)
videoOldTime = str(valsDict.get('old_time', None))
videoNewTime = str(valsDict.get('new_time', None))
videoSeekType = valsDict.get('type', None)
self.setValInRow(row, 'video_id', videoID)
self.setValInRow(row, 'video_code', videoCode)
self.setValInRow(row, 'video_old_time', videoOldTime)
self.setValInRow(row, 'video_new_time', videoNewTime)
self.setValInRow(row, 'video_seek_type', videoSeekType)
return row
def handleVideoSpeedChange(self, record, row, event):
'''
Events look like this::
"{\"id\":\"i4x-Medicine-HRP258-videoalpha-7cd4bf0813904612bcd583a73ade1d54\",
\"code\":\"html5\",
\"currentTime\":1.6694719791412354,
\"old_speed\":\"1.50\",
\"new_speed\":\"2.0\"}"
'''
if event is None:
self.logWarn("Track log line %s: missing event text in video speed change." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
valsDict = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in video speed change: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
videoID = valsDict.get('id', None)
self.setResourceDisplayName(row, videoID)
videoCode = valsDict.get('code', None)
videoCurrentTime = str(valsDict.get('currentTime', None))
videoOldSpeed = str(valsDict.get('old_speed', None))
videoNewSpeed = str(valsDict.get('new_speed', None))
self.setValInRow(row, 'video_id', videoID)
self.setValInRow(row, 'video_code', videoCode)
self.setValInRow(row, 'video_current_time', videoCurrentTime)
self.setValInRow(row, 'video_old_speed', videoOldSpeed)
self.setValInRow(row, 'video_new_speed', videoNewSpeed)
return row
def handleFullscreen(self, record, row, event):
'''
Events look like this::
"{\"id\":\"i4x-Medicine-HRP258-videoalpha-4b200d3944cc47e5ae3ad142c1006075\",\"code\":\"html5\",\"currentTime\":348.4132080078125}"
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text event type fullscreen." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
valsDict = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in fullscreen: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
videoID = valsDict.get('id', None)
self.setResourceDisplayName(row, videoID)
videoCode = valsDict.get('code', None)
videoCurrentTime = str(valsDict.get('currentTime', None))
self.setValInRow(row, 'video_id', videoID)
self.setValInRow(row, 'video_code', videoCode)
self.setValInRow(row, 'video_current_time', videoCurrentTime)
return row
def handleNotFullscreen(self, record, row, event):
'''
Events look like this::
"{\"id\":\"i4x-Medicine-HRP258-videoalpha-c5cbefddbd55429b8a796a6521b9b752\",\"code\":\"html5\",\"currentTime\":661.1010131835938}"
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text event type fullscreen." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
valsDict = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in not_fullscreen: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
videoID = valsDict.get('id', None)
self.setResourceDisplayName(row, videoID)
videoCode = valsDict.get('code', None)
videoCurrentTime = str(valsDict.get('currentTime', None))
self.setValInRow(row, 'video_id', videoID)
self.setValInRow(row, 'video_code', videoCode)
self.setValInRow(row, 'video_current_time', videoCurrentTime)
return row
def handleBook(self, record, row, event):
'''
The textbook event series is a bit complicate.
Check the OpenEdX documentation for the JSON (below
are three examples). Depending on the textbook event
type, target columns are used in different ways:
JSON Column Comment
------------------------------------------------------------------------------------
chapter resource_display_nam (pdf file name)
thumbnail_title resource_display_name
chapter_title resource_display_name
query resource_display_name (for event textbook.pdf.search.executed)
amount is target zoom level)
page sequence_id (page within pdf file)
page goto_from (for event textbook.pdf.search.executed,
also for textbook.pdf.thumbnails.toggled)
new goto_dest
old goto_from
amount goto_dest (used in textbook.pdf.zoom.menu.changed
name book_interaction_type (i.e. textbook.pdf.page.loaded
for 'gotopage' in 'type'
textbook.pdf.page.navigatedprevious
for 'prevpage' in 'type'
textbook.pdf.page.navigatednext
for 'nextpage' in 'type')
direction book_interaction_type (when direction field present,
then name fld is just
a repeat of event_type
findprevious book_interaction_type (for event textbook.pdf.search.navigatednext)
status success (for event textbook.pdf.search.executed)
Example JSON events:
{
"username": "...",
"event_type": "textbook.pdf.page.scrolled",
"event": "{\"page\":5,
\"direction\":\"down\",
\"chapter\":\"/foo/Introduction.pdf\",
\"name\":\"textbook.pdf.page.scrolled\"}",
"event_source": "browser",
"context": {
"user_id": 606813,
"org_id": "Philosophy",
"course_id": "Philosophy/LPL/2014",
"path": "/event"
},
"page": "https://class.stanford.edu/courses/Philosophy/LPL/2014/pdfbook/0/?viewer=true&file=/c4x/Philosophy/LPL/asset/Chapter_2_from_LPL_textbook.pdf#zoom=page-fit"
}
{
"username": "...",
"event_type": "book",
"event": "{\"type\":\"gotopage\",
\"old\":4,
\"new\":5,
\"chapter\":\"/c4x/Philosophy/LPL/asset/Introduction.pdf\",
\"name\":\"textbook.pdf.page.loaded\"}",
"event_source": "browser",
"context": {
"user_id": 606813,
"org_id": "Philosophy",
"course_id": "Philosophy/LPL/2014",
"path": "/event"
},
"page": "https://class.stanford.edu/courses/Philosophy/LPL/2014/pdfbook/0/?viewer=true&file=/c4x/Philosophy/LPL/asset/Chapter_2_from_LPL_textbook.pdf#zoom=page-fit"
}
{
"username": "...",
"event_type": "book",
"event": "{\"type\":\"gotopage\",
\"old\":6,
\"new\":7,
\"chapter\":\"/c4x/Philosophy/LPL/asset/Introduction.pdf\",
\"name\":\"textbook.pdf.page.loaded\"}",
"context": {
"user_id": 606813,
"org_id": "Philosophy",
"course_id": "Philosophy/LPL/2014",
"path": "/event"
},
"page": "https://class.stanford.edu/courses/Philosophy/LPL/2014/pdfbook/0/?viewer=true&file=/c4x/Philosophy/LPL/asset/Chapter_2_from_LPL_textbook.pdf#zoom=page-fit"
}
'''
if event is None:
self.logWarn("Track log line %s: missing event text in book event type." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
# Make a dict from the string:
valsDict = self.ensureDict(event)
if valsDict is None:
self.logWarn("Track log line %s: event is not a dict in book event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
bookInteractionType = valsDict.get('name', None)
chapter = valsDict.get('chapter', None)
bookOld = valsDict.get('old', None)
bookNew = valsDict.get('new', None)
page = valsDict.get('page', None)
thumbNailTitle = valsDict.get('thumbnail_title', None)
chapterTitle = valsDict.get('chapter_title', None)
direction = valsDict.get('direction', None)
amount = valsDict.get('amount', None)
caseSensitive = valsDict.get('caseSensistive', None)
query = valsDict.get('query', None)
status = valsDict.get('status', None)
findprevious = valsDict.get('findprevious', None)
highlightAll = valsDict.get('highlightAll', None)
if bookInteractionType is not None:
self.setValInRow(row, 'book_interaction_type', bookInteractionType)
if chapter is not None:
self.setValInRow(row, 'resource_display_name', chapter)
if bookOld is not None:
self.setValInRow(row, 'goto_from', bookOld)
if bookNew is not None:
self.setValInRow(row, 'goto_dest', bookNew)
if type(page) == int:
self.setValInRow(row, 'goto_from', page)
self.setValInRow(row, 'goto_dest', page)
if thumbNailTitle is not None:
self.setValInRow(row, 'sequence_id', thumbNailTitle)
if chapterTitle is not None:
self.setValInRow(row, 'sequence_id', chapterTitle)
if direction is not None:
self.setValInRow(row, 'mode', direction)
if amount is not None:
self.setValInRow(row, 'mode', amount)
if caseSensitive is not None:
self.setValInRow(row, 'mode', caseSensitive)
if query is not None:
self.setValInRow(row, 'sequence_id', query)
if status is not None:
self.setValInRow(row, 'success', query)
if findprevious is not None:
if findprevious:
self.setValInRow(row, 'mode', 'previous')
else:
self.setValInRow(row, 'mode', 'next')
if highlightAll is not None:
if highlightAll:
self.setValInRow(row, 'mode', 'do_highlight_all')
else:
self.setValInRow(row, 'mode', 'dont_highlight_all')
return row
def handleShowAnswer(self, record, row, event):
'''
Gets a event string like this::
{"problem_id": "i4x://Medicine/HRP258/problem/28b525192c4e43daa148dc7308ff495e"}
'''
if event is None:
self.logWarn("Track log line %s: missing event text in showanswer." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in handle showanswer event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
try:
problem_id = event['problem_id']
except KeyError:
self.logWarn("Track log line %s: showanswer event does not include a problem ID: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
self.setValInRow(row, 'problem_id', problem_id)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problem_id)
return row
def handleShowHideTranscript(self, record, row, event):
'''
Events look like this::
"{\"id\":\"i4x-Medicine-HRP258-videoalpha-c26e4247f7724cc3bc407a7a3541ed90\",
\"code\":\"q3cxPJGX4gc\",
\"currentTime\":0}"
Same for hide_transcript
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event info in show_transcript or hide_transcript." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in show_transcript or hide_transcript: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
xcriptID = event.get('id', None)
code = event.get('code', None)
self.setValInRow(row, 'transcript_id', xcriptID)
self.setValInRow(row, 'transcript_code', code)
return row
def handleProblemCheckFail(self, record, row, event):
'''
Gets events like this::
{
"failure": "unreset",
"state": {
"student_answers": {
"i4x-Education-EDUC115N-problem-ab38a55d2eb145ae8cec26acebaca27f_2_1": "choice_0"
},
"seed": 89,
"done": true,
"correct_map": {
"i4x-Education-EDUC115N-problem-ab38a55d2eb145ae8cec26acebaca27f_2_1": {
"hint": "",
"hintmode": null,
"correctness": "correct",
"msg": "",
"npoints": null,
"queuestate": null
}
},
"input_state": {
"i4x-Education-EDUC115N-problem-ab38a55d2eb145ae8cec26acebaca27f_2_1": {
}
}
},
"problem_id": "i4x:\/\/Education\/EDUC115N\/problem\/ab38a55d2eb145ae8cec26acebaca27f",
"answers": {
"i4x-Education-EDUC115N-problem-ab38a55d2eb145ae8cec26acebaca27f_2_1": "choice_0"
}
}
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in problem_check event." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in handle problem_check event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
problem_id = event.get('problem_id', None)
success = event.get('failure', None) # 'closed' or 'unreset'
self.setValInRow(row, 'problem_id', problem_id)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problem_id)
self.setValInRow(row, 'success', success)
answersDict = event.get('answers', None)
stateDict = event.get('state', None)
if isinstance(answersDict, dict) and len(answersDict) > 0:
# Receive all the Answer table keys generated for
# the answers, and a dict mapping each key
# to the problem ID to which that key's row
# in the Answer refers:
(answersFKeys, answerToProblemMap) = self.pushAnswers(answersDict)
else:
answersFKeys = []
if isinstance(stateDict, dict) and len(stateDict) > 0:
stateFKeys = self.pushState(stateDict)
else:
stateFKeys = []
generatedAllRows = False
indexToFKeys = 0
# Generate main table rows that refer to all the
# foreign entries we made above to tables Answer, and State
# We make as few rows as possible by filling in
# columns in all three foreign key entries, until
# we run out of all references:
while not generatedAllRows:
try:
answerFKey = answersFKeys[indexToFKeys]
except IndexError:
answerFKey = None
try:
stateFKey = stateFKeys[indexToFKeys]
except IndexError:
stateFKey = None
# Have we created rows to cover all answers, and states?
if answerFKey is None and stateFKey is None:
generatedAllRows = True
continue
# Fill in one main table row.
self.setValInRow(row, 'answer_fk', answerFKey if answerFKey is not None else '')
if answerFKey is not None:
# For convenience: enter the Answer's problem ID
# in the main table's problemID field:
problemID = answerToProblemMap[answerFKey] if answerToProblemMap[answerFKey] is not None else ''
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
self.setValInRow(row, 'state_fk', stateFKey if stateFKey is not None else '')
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
indexToFKeys += 1
return row
def handleProblemRescoreFail(self, record, row, event):
'''
No example available. Records reportedly include:
state, problem_id, and failure reason
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event info in problem_rescore_fail." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
problem_id = event.get('problem_id', None)
failure = event.get('failure', None) # 'closed' or 'unreset'
self.setValInRow(row, 'problem_id', problem_id)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problem_id)
self.setValInRow(row, 'failure', failure)
stateDict = event.get('state', None)
if isinstance(stateDict, dict) and len(stateDict) > 0:
stateFKeys = self.pushState(stateDict)
else:
stateFKeys = []
for stateFKey in stateFKeys:
# Fill in one main table row.
self.setValInRow(row, 'state_fk', stateFKey, self.mainTableName)
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
return []
def handleProblemRescore(self, record, row, event):
'''
No example available
Fields: state, problemID, orig_score (int), orig_total(int), new_score(int),
new_total(int), correct_map, success (string 'correct' or 'incorrect'), and
attempts(int)
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in problem_rescore event." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in handle problem_rescore event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
problem_id = event.get('problem_id', None)
success = event.get('success', None) # 'correct' or 'incorrect'
attempts = event.get('attempts', None)
orig_score = event.get('orig_score', None)
orig_total = event.get('orig_total', None)
new_score = event.get('new_score', None)
new_total = event.get('new_total', None)
correctMapsDict = event.get('correct_map', None)
# Store the top-level vals in the main table:
self.setValInRow(row, 'problem_id', problem_id)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problem_id)
self.setValInRow(row, 'success', success)
self.setValInRow(row, 'attempts', attempts)
self.setValInRow(row, 'orig_score', orig_score)
self.setValInRow(row, 'orig_total', orig_total)
self.setValInRow(row, 'new_score', new_score)
self.setValInRow(row, 'new_total', new_total)
# And the correctMap, which goes into a different table:
if isinstance(correctMapsDict, dict) and len(correctMapsDict) > 0:
correctMapsFKeys = self.pushCorrectMaps(correctMapsDict)
else:
correctMapsFKeys = []
# Replicate main table row if needed:
for correctMapFKey in correctMapsFKeys:
# Fill in one main table row.
self.setValInRow(row, 'correctMap_fk', correctMapFKey, self.mainTableName)
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
return []
def handleSaveProblemFailSuccessCheckOrReset(self, record, row, event):
'''
Do have examples. event has fields state, problem_id, failure, and answers.
For save_problem_success or save_problem_check there is no failure field
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in save_problem_fail, save_problem_success, or reset_problem_fail." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in handle save_problem_fail, save_problem_success, or reset_problem_fail event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
problem_id = event.get('problem_id', None)
success = event.get('failure', None) # 'closed' or 'unreset'
if success is None:
success = event.get('success', None) # 'incorrect' or 'correct'
self.setValInRow(row, 'problem_id', problem_id)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problem_id)
self.setValInRow(row, 'success', success)
answersDict = event.get('answers', None)
stateDict = event.get('state', None)
if isinstance(answersDict, dict) and len(answersDict) > 0:
# Receive all the Answer table keys generated for
# the answers, and a dict mapping each key
# to the problem ID to which that key's row
# in the Answer refers:
(answersFKeys, answerToProblemMap) = self.pushAnswers(answersDict)
else:
answersFKeys = []
if isinstance(stateDict, dict) and len(stateDict) > 0:
stateFKeys = self.pushState(stateDict)
else:
stateFKeys = []
generatedAllRows = False
indexToFKeys = 0
# Generate main table rows that refer to all the
# foreign entries we made above to tables Answer, and State
# We make as few rows as possible by filling in
# columns in all three foreign key entries, until
# we run out of all references:
while not generatedAllRows:
try:
answerFKey = answersFKeys[indexToFKeys]
except IndexError:
answerFKey = None
try:
stateFKey = stateFKeys[indexToFKeys]
except IndexError:
stateFKey = None
# Have we created rows to cover all answers, and states?
if answerFKey is None and stateFKey is None:
generatedAllRows = True
continue
# Fill in one main table row.
self.setValInRow(row, 'answer_fk', answerFKey if answerFKey is not None else '')
if answerFKey is not None:
# For convenience: enter the Answer's problem ID
# in the main table's problemID field:
problemID = answerToProblemMap[answerFKey] if answerToProblemMap[answerFKey] is not None else ''
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
self.setValInRow(row, 'state_fk', stateFKey if stateFKey is not None else '')
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
indexToFKeys += 1
return []
def handleResetProblem(self, record, row, event):
'''
Events look like this::
{"old_state":
{"student_answers": {"i4x-HMC-MyCS-problem-d457165577d34e5aac6fbb55c8b7ad33_2_1": "choice_2"},
"seed": 811,
"done": true,
"correct_map": {"i4x-HMC-MyCS-problem-d457165577d34e5aac6fbb55c8b7ad33_2_1": {"hint": "",
"hintmode": null,
...
}},
"problem_id": "i4x://HMC/MyCS/problem/d457165577d34e5aac6fbb55c8b7ad33",
"new_state": {"student_answers": {}, "seed": 93, "done": false, "correct_map": {}, "input_state": {"i4x-HMC-MyCS-problem-d457165577d34e5aac6fbb55c8b7ad33_2_1": {}}}}
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in reset_problem." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in handle reset_problem event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
self.setValInRow(row, 'problem_id',event.get('problem_id', ''))
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, event.get('problem_id', ''))
oldStateDict = event.get('old_state', None)
newStateDict = event.get('new_state', None)
stateFKeys = []
if isinstance(oldStateDict, dict) and len(oldStateDict) > 0:
stateFKeys.extend(self.pushState(oldStateDict))
if isinstance(newStateDict, dict) and len(newStateDict) > 0:
stateFKeys.extend(self.pushState(newStateDict))
for stateFKey in stateFKeys:
# Fill in one main table row.
self.setValInRow(row, 'state_fk', stateFKey if stateFKey is not None else '')
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
return []
def handleRescoreReset(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event info in rescore-all-submissions or reset-all-attempts." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in handle resource event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
courseID = event.get('course', '')
if len(courseID) == 0:
self.logWarn("Track log line %s: missing course ID in rescore-all-submissions or reset-all-attempts." %\
(self.jsonToRelationConverter.makeFileCitation()))
problemID = event.get('problem', '')
if len(problemID) == 0:
self.logWarn("Track log line %s: missing problem ID in rescore-all-submissions or reset-all-attempts." %\
(self.jsonToRelationConverter.makeFileCitation()))
self.setValInRow(row, 'course_id', courseID)
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
return row
def handleDeleteStateRescoreSubmission(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event info in delete-student-module-state or rescore-student-submission." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in delete-student-module-state or rescore-student-submission event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
courseID = event.get('course', '')
problemID = event.get('problem', '')
studentID = event.get('student', '')
if courseID is None:
self.logWarn("Track log line %s: missing course ID in delete-student-module-state or rescore-student-submission." %\
(self.jsonToRelationConverter.makeFileCitation()))
if problemID is None:
self.logWarn("Track log line %s: missing problem ID in delete-student-module-state or rescore-student-submission." %\
(self.jsonToRelationConverter.makeFileCitation()))
if studentID is None:
self.logWarn("Track log line %s: missing student ID in delete-student-module-state or rescore-student-submission." %\
(self.jsonToRelationConverter.makeFileCitation()))
self.setValInRow(row, 'course_id', courseID)
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
self.setValInRow(row, 'student_id', studentID)
return row
def handleResetStudentAttempts(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event info in reset-student-attempts." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in reset-student-attempt event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
problemID = event.get('problem', '')
studentID = event.get('student', '')
instructorID = event.get('instructor_id', '')
attempts = event.get('old_attempts', -1)
if len(problemID) == 0:
self.logWarn("Track log line %s: missing problem ID in reset-student-attempts." %\
(self.jsonToRelationConverter.makeFileCitation()))
if len(studentID) == 0:
self.logWarn("Track log line %s: missing student ID in reset-student-attempts." %\
(self.jsonToRelationConverter.makeFileCitation()))
if len(instructorID) == 0:
self.logWarn("Track log line %s: missing instrucotrIDin reset-student-attempts." %\
(self.jsonToRelationConverter.makeFileCitation()))
if attempts < 0:
self.logWarn("Track log line %s: missing attempts field in reset-student-attempts." %\
(self.jsonToRelationConverter.makeFileCitation()))
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
self.setValInRow(row, 'student_id', studentID)
self.setValInRow(row, 'instructor_id', instructorID)
self.setValInRow(row, 'attempts', attempts)
return row
def handleGetStudentProgressPage(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event info in get-student-progress-page." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in get-student-progress-page event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
studentID = event.get('student', None)
instructorID = event.get('instructor_id', None)
if studentID is None:
self.logWarn("Track log line %s: missing student ID in get-student-progress-page." %\
(self.jsonToRelationConverter.makeFileCitation()))
if instructorID is None:
self.logWarn("Track log line %s: missing instrucotrID in get-student-progress-page." %\
(self.jsonToRelationConverter.makeFileCitation()))
self.setValInRow(row, 'student_id', studentID)
self.setValInRow(row, 'instructor_id', instructorID)
return row
def handleAddRemoveInstructor(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event info in add-instructor or remove-instructor." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in add-instructor or remove-instructor event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
instructorID = event.get('instructor_id', None)
if instructorID is None:
self.logWarn("Track log line %s: missing instrucotrID add-instructor or remove-instructor." %\
(self.jsonToRelationConverter.makeFileCitation()))
self.setValInRow(row, 'instructor_id', instructorID)
return row
def handleListForumMatters(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event info in list-forum-admins, list-forum-mods, or list-forum-community-TAs." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in list-forum-admins, list-forum-mods, or list-forum-community-TAs event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
return row
def handleForumManipulations(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event info in one of remove-forum-admin, add-forum-admin, " +\
"remove-forum-mod, add-forum-mod, remove-forum-community-TA, or add-forum-community-TA." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in one of handle forum manipulations event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
screen_name = event.get('username', None)
if screen_name is None:
self.logWarn("Track log line %s: missing screen_name in one of remove-forum-admin, add-forum-admin, " +\
"remove-forum-mod, add-forum-mod, remove-forum-community-TA, or add-forum-community-TA." %\
(self.jsonToRelationConverter.makeFileCitation()))
self.setValInRow(row, 'screen_name', self.hashGeneral(screen_name))
return row
def handlePsychometricsHistogramGen(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event info in psychometrics-histogram-generation." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in psychometrics-histogram-generation event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
problemID = event.get('problem', None)
if problemID is None:
self.logWarn("Track log line %s: missing problemID in pyschometrics-histogram-generation event." %\
(self.jsonToRelationConverter.makeFileCitation()))
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
return row
def handleAddRemoveUserGroup(self, record, row, event):
'''
This event looks like this::
{"event_name": "beta-tester",
"user": "smith",
"event": "add"}
Note that the 'user' is different from the screen_name. The latter triggered
the event. User is the group member being talked about. For clarity,
'user' is called 'group_user', and 'event' is called 'group_event' in the
main table.
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event info add-or-remove-user-group" %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in add-or-remove-user-group event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
eventName = event.get('event_name', None)
user = event.get('user', None)
event = event.get('event', None)
if eventName is None:
self.logWarn("Track log line %s: missing event_name in add-or-remove-user-group." %\
(self.jsonToRelationConverter.makeFileCitation()))
if user is None:
self.logWarn("Track log line %s: missing user field in add-or-remove-user-group." %\
(self.jsonToRelationConverter.makeFileCitation()))
if event is None:
self.logWarn("Track log line %s: missing event field in add-or-remove-user-group." %\
(self.jsonToRelationConverter.makeFileCitation()))
self.setValInRow(row, 'event_name', eventName)
self.setValInRow(row, 'group_user', user)
self.setValInRow(row, 'group_action', event)
return row
def handleCreateAccount(self, record, row, event):
'''
Get event structure like this (fictitious values)::
"{\"POST\": {\"username\": [\"luisXIV\"],
\"name\": [\"Roy Luigi Cannon\"],
\"mailing_address\": [\"3208 Dead St\\r\\nParis, GA 30243\"],
\"gender\": [\"f\"],
\"year_of_birth\": [\"1986\"],
\"level_of_education\": [\"p\"],
\"goals\": [\"flexibility, cost, 'prestige' and course of study\"],
\"honor_code\": [\"true\"],
\"terms_of_service\": [\"true\"],
\"course_id\": [\"Medicine/HRP258/Statistics_in_Medicine\"],
\"password\": \"********\",
\"enrollment_action\": [\"enroll\"],
\"email\": [\"luig.cannon@yahoo.com\"]}, \"GET\": {}}"
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in event type create_account." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
try:
# From {\"POST\": {\"username\": ... , \"GET\": {}}
# get the inner dict, i.e. the value of 'POST':
# Like this:
# {'username': ['luisXIV'],
# 'mailing_address': ['3208 Dead St\r\nParis, GA 30243'],
# ...
# }
postDict = event['POST']
except Exception as e:
self.logWarn("Track log line %s: event is not a dict in create_account event: '%s' (%s)" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event), `e`))
return row
# Get the POST field's entries into an ordered
# dict as expected by pushAccountInfo():
accountDict = OrderedDict()
accountDict['account_id'] = None # filled in by pushAccountInfo()
userScreenName = postDict.get('username', '')
accountDict['screen_name'] = userScreenName
accountDict['name'] = postDict.get('name', '')
if isinstance(userScreenName, list):
userScreenName = userScreenName[0]
accountDict['anon_screen_name'] = self.hashGeneral(userScreenName)
accountDict['mailing_address'] = postDict.get('mailing_address', '')
# Mailing addresses are enclosed in brackets, making them
# an array. Pull the addr string out:
mailAddr = accountDict['mailing_address']
if isinstance(mailAddr, list):
mailAddr = mailAddr[0]
accountDict = self.getZipAndCountryFromMailAddr(mailAddr, accountDict)
else:
accountDict['zipcode'] = ''
accountDict['country'] = ''
# Make sure that zip code is null unless address is USA:
if accountDict['country'] != 'USA':
accountDict['zipcode'] = ''
accountDict['gender'] = postDict.get('gender', '')
accountDict['year_of_birth'] = postDict.get('year_of_birth', -1)
accountDict['level_of_education'] = postDict.get('level_of_education', '')
accountDict['goals'] = postDict.get('goals', '')
accountDict['honor_code'] = postDict.get('honor_code', -1)
accountDict['terms_of_service'] = postDict.get('terms_of_service', -1)
accountDict['course_id'] = postDict.get('course_id', '')
accountDict['enrollment_action'] = postDict.get('enrollment', '')
accountDict['email'] = postDict.get('email', '')
accountDict['receive_emails'] = postDict.get('receive_emails', '')
# Some values in create_account are arrays. Replace those
# values' entries in accountDict with the arrays' first element:
for fldName in accountDict.keys():
if isinstance(accountDict[fldName], list):
accountDict[fldName] = accountDict[fldName][0]
# Convert some values into more convenient types
# (that conform to the SQL types we declared in
# self.schemaAccountTbl:
try:
accountDict['year_of_birth'] = int(accountDict['year_of_birth'])
except:
accountDict['year_of_birth'] = 0
try:
accountDict['terms_of_service'] = 1 if accountDict['terms_of_service'] == 'true' else 0
except:
pass
try:
accountDict['honor_code'] = 1 if accountDict['honor_code'] == 'true' else 0
except:
pass
# Escape single quotes and CR/LFs in the various fields, so that MySQL won't throw up.
# Also replace newlines with ", ":
if len(accountDict['goals']) > 0:
accountDict['goals'] = self.makeInsertSafe(accountDict['goals'])
if len(accountDict['screen_name']) > 0:
accountDict['screen_name'] = self.makeInsertSafe(accountDict['screen_name'])
if len(accountDict['name']) > 0:
accountDict['name'] = self.makeInsertSafe(accountDict['name'])
if len(accountDict['mailing_address']) > 0:
accountDict['mailing_address'] = self.makeInsertSafe(accountDict['mailing_address'])
# Get the (only) Account table foreign key.
# Returned in an array for conformance with the
# other push<TableName>Info()
self.pushAccountInfo(accountDict)
return row
def handleCourseEnrollActivatedDeactivated(self, record, row, event):
'''
Handles events edx_course_enrollment_activated, and edx_course_enrollment_deactivated.
Checks the context field. If it contains a 'path' field, then
its value is placed in the 'page' column.
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
event = self.ensureDict(event)
if event is None:
self.logWarn("Track log line %s: event is not a dict in edx_course_enrollment_(de)activated event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
self.setValInRow(row, 'mode', event.get('mode', None))
self.setValInRow(row, 'session', event.get('session', None))
if self.currContext is not None:
pathToUiButton = self.currContext.get('path', None)
self.setValInRow(row, 'page', pathToUiButton)
return row
def handleCourseEnrollUpgradeOrSucceeded(self, record, row, event):
'''
Handles events edx_course_enrollment_upgrade_clicked, and
edx_course_enrollment_upgrade_succeeded, and edx_course_enrollment_deactivated.
Checks the context field. If it contains a 'mode' field, then
its value is placed in the 'mode' column.
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if self.currContext is not None:
pathToUiButton = self.currContext.get('mode', None)
self.setValInRow(row, 'mode', pathToUiButton)
return row
def handleProblemGraded(self, record, row, event):
'''
Events look like this::
'[...#8217;t improve or get worse. Calculate the 95% confidence interval for the true proportion of heart disease patients who improve their fitness using this particular exercise regimen. Recall that proportions are normally distributed with a standard error of </p><p>\\\\[ \\\\sqrt{\\\\frac{p(1-p)}{n}} \\\\]</p><p>(You may use the observed proportion to calculate the standard error.)</p><span><form class=\\\"choicegroup capa_inputtype\\\" id=\\\"inputtype_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1\\\"><div class=\\\"indicator_container\\\">\\n </div><fieldset><label for=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1_choice_0\\\"><input type=\\\"radio\\\" name=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1\\\" id=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1_choice_0\\\" aria-describedby=\\\"answer_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1\\\" value=\\\"choice_0\\\"/> 66%\\n\\n </label><label for=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1_choice_1\\\"><input type=\\\"radio\\\" name=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1\\\" id=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1_choice_1\\\" aria-describedby=\\\"answer_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1\\\" value=\\\"choice_1\\\"/> 66%-70%\\n\\n </label><label for=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1_choice_2\\\" class=\\\"choicegroup_correct\\\"><input type=\\\"radio\\\" name=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1\\\" id=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1_choice_2\\\" aria-describedby=\\\"answer_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1\\\" value=\\\"choice_2\\\" checked=\\\"true\\\"/> 50%-84%\\n\\n \\n <span class=\\\"sr\\\" aria-describedby=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1_choice_2\\\">Status: correct</span>\\n </label><label for=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1_choice_3\\\"><input type=\\\"radio\\\" name=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_1\\\" id=\\\"input_i4x-Medicine-HRP258-problem-fc217b7c689a40938dd55ebc44cb6f9a_4_...
]'
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in save_problem_fail, save_problem_success, or reset_problem_fail." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
answersDict = {}
# The following will go through the mess, and
# pull out all pairs problemID/(in)correct. Those
# will live in each Match obj's group(1) and group(2)
# respectively:
probIdCorrectIterator = EdXTrackLogJSONParser.problemGradedComplexPattern.finditer(str(event))
if probIdCorrectIterator is None:
# Should have found at least one probID/correctness pair:
self.logWarn("Track log line %s: could not parse out problemID/correctness pairs from '%s'. (stuffed into badlyFormatted)" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
self.setValInRow(row, 'badly_formatted', str(event))
return row
# Go through each match:
for searchMatch in probIdCorrectIterator:
answersDict[searchMatch.group(1)] = searchMatch.group(2)
if len(answersDict) > 0:
# Receive all the Answer table keys generated for
# the answers, and a dict mapping each key
# to the problem ID to which that key's row
# in the Answer refers:
(answersFKeys, answerToProblemMap) = self.pushAnswers(answersDict)
else:
answersFKeys = []
if len(answersFKeys) > 0:
# Now need to generate enough near-replicas of event
# entries to cover all answer
# foreign key entries that were created:
for answerFKey in answersFKeys:
# Fill in one main table row.
self.setValInRow(row, 'answer_fk', answerFKey)
if answerFKey is not None:
# For convenience: enter the Answer's problem ID
# in the main table's problemID field:
problemID = answerToProblemMap[answerFKey]
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
# Return empty row, b/c we already pushed all necessary rows:
return []
def handleReceiveEmail(self, record, row, event):
'''
Event is something like this::
{"course": "Medicine/SciWrite/Fall2013", "receive_emails": "yes"}
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in event type change-email-settings." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
accountDict = self.ensureDict(event)
if accountDict is None:
self.logWarn("Track log line %s: event is not a dict in change-email-settings event: '%s' (%s)" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
course_id = accountDict.get('course', None)
receive_emails = accountDict.get('receive_emails', None)
screen_name = record.get('username', None)
# Get the event fields and put them in their place:
# dict as expected by pushAccountInfo():
accountDict = OrderedDict()
accountDict['account_id'] = None # filled in by pushAccountInfo()
accountDict['anon_screen_name'] = self.hashGeneral(screen_name)
accountDict['name'] = None
accountDict['mailing_address'] = None
mailAddr = accountDict['mailing_address']
if mailAddr is not None:
# Mailing addresses are enclosed in brackets, making them
# an array. Pull the addr string out:
if isinstance(mailAddr, list):
mailAddr = mailAddr[0]
accountDict = self.getZipAndCountryFromMailAddr(mailAddr, accountDict)
else:
accountDict['zipcode'] = None
accountDict['country'] = None
accountDict['gender'] = None
accountDict['year_of_birth'] = None
accountDict['level_of_education'] = None
accountDict['goals'] = None
accountDict['honor_code'] = None
accountDict['terms_of_service'] = None
accountDict['course_id'] = course_id
accountDict['enrollment_action'] = None
accountDict['email'] = None
accountDict['receive_emails'] = receive_emails
#************** push to account???
return row
def handleABExperimentEvent(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event text in event type assigned_user_to_partition or child_id." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
eventDict = self.ensureDict(event)
if eventDict is None:
self.logWarn("Track log line %s: event is not a dict in assigned_user_to_partition or child_id event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
if len(row) < 1:
self.logWarn("Track log line %s: encountered empty partial row while processing assigned_user_to_partition or child_id: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
try:
eventType = record['event_type']
except KeyError:
eventType = None
# Give the ABExperiment table row we are constructing
# the same key as the current row in EdxTrackEvent:
currEventRowId = row[0]
abExpDict = OrderedDict()
abExpDict['event_table_id'] = currEventRowId
abExpDict['event_type'] = eventType
abExpDict['anon_screen_name'] = self.getValInRow(row, 'anon_screen_name')
abExpDict['group_id'] = eventDict.get('group_id', -1)
abExpDict['group_name'] = eventDict.get('group_name', '')
abExpDict['partition_id'] = eventDict.get('partition_id', -1)
abExpDict['partition_name'] = eventDict.get('partition_name', '')
abExpDict['child_module_id'] = eventDict.get('child_id', '')
abExpDict['resource_display_name'] = self.findModuleNameInEventContext(record)
abExpDict['cohort_id'] = eventDict.get('cohort_id', -1)
abExpDict['cohort_name'] = eventDict.get('cohort_name', '')
abExpDict['course_display_name'] = self.currCourseID
if not eventType.endswith('assigned_user_to_partition'):
context = eventDict.get('context', {})
abTestInfo = context.get('course_user_tags', None)
if abTestInfo is not None:
abTestInfoDict = self.ensureDict(abTestInfo)
if len(abTestInfoDict) > 0:
# The dict contains one key/value pair: the key is
# the name of the partition that a learner is in.
# The value is the learner's groupID:
tagsKeys = abTestInfoDict.keys()
if len(tagsKeys) > 0:
abExpDict['partition_name'] = tagsKeys[0]
abExpDict['group_id'] = abTestInfoDict[tagsKeys[0]]
if len(tagsKeys) > 1:
self.logWarn("ABExperiment related event with course_user_tags length > 1; ignoring all but first key/val pair.")
if eventType.endswith('child_render'):
abExpDict['child_module_id'] = childId = eventDict.get('child-id', '')
if len(childId) == 0:
# 'child-id' is the documented field name. But EdX likes to change
# hyphens to underscores, so in case the ever do:
abExpDict['child_module_id'] = childId = eventDict.get('child_id', '')
try:
moduleDisplayName = self.findResourceDisplayName(childId)
except TypeError:
# A type error was seen once during a transform; unsure how
# that comes about. But this guards against them:
self.logWarn("Event %s should have child-id field but has '%s'" % (eventType,childId))
moduleDisplayName = ''
if len(moduleDisplayName) > 0:
abExpDict['resource_display_name'] = moduleDisplayName
self.pushABExperimentInfo(abExpDict)
return row
def addEventToABExperiment(self, eventTableId, eventType, contextDict):
'''
[This method is currently not used, and likely won't in the future]
Called when an event concerns a learner that has been
assigned to an AB experiment. The entry into the EdxTrackEvent
table is handled elsewhere. This method just adds info about that
event to the ABExperiment table. That table's first column
is a pointer into the EdxTrackEvent table; so that pt must
be passed in.
:param eventTableId: reference to _id of row in EdxTrackEvent table that records the main info about the event
:type eventTableId: string
:param eventType: event type of event being noted in ABExperiment
:type eventType: string
:param contextDict: the context dict from within the event's JSON structure
:type contextDict: {}
'''
if contextDict is None:
self.logWarn("Track log line %s: missing context field in event that concerns learner in AB Experiment: " %\
(self.jsonToRelationConverter.makeFileCitation()))
return
# Ensure that the passed-in context dict includes the
# required course_user_tags dict:
courseUserTagsDict = contextDict.get('course_user_tags', None)
if courseUserTagsDict is None:
self.logWarn("Track log line %s: missing course_user_tags in context field in event that concerns learner in AB Experiment: " %\
(self.jsonToRelationConverter.makeFileCitation()))
return
abTestInfoDict = self.ensureDict(courseUserTagsDict)
try:
resourceDisplayName = contextDict['module']['display_name']
except:
resourceDisplayName = ''
# Build an empty ABExperiment row,
# except for the EdxTrackEvent pointer:
abExpDict = OrderedDict()
abExpDict['event_table_id'] = eventTableId
abExpDict['event_type'] = eventType
abExpDict['anon_screen_name'] = ''
abExpDict['group_id'] = -1
abExpDict['group_name'] = ''
abExpDict['partition_id'] = -1
abExpDict['partition_name'] = ''
abExpDict['child_module_id'] = ''
abExpDict['resource_display_name'] = resourceDisplayName
abExpDict['cohort_id'] = -1
abExpDict['cohort_name'] = ''
abExpDict['course_display_name'] = ''
if len(abTestInfoDict) > 0:
# The dict contains key/value pairs: that can
# be course_id, user_id, partition_id, group_id,
# group_name, partition_name. Pick out the ones
# we want in the ABExperiment rows:
abExpDict['partition_name'] = abTestInfoDict.get('partition_name', '')
abExpDict['partition_id'] = abTestInfoDict.get('partition_id', -1)
abExpDict['group_name'] = abTestInfoDict.get('group_name', '')
abExpDict['group_id'] = abTestInfoDict.get('group_id', -1)
self.pushABExperimentInfo(abExpDict)
def handleOpenAssessmentEvent(self, record, row, event):
if event is None:
self.logWarn("Track log line %s: missing event text in one of the openassessment event types." %\
(self.jsonToRelationConverter.makeFileCitation()))
return row
eventDict = self.ensureDict(event)
if eventDict is None:
self.logWarn("Track log line %s: event is not a dict in one of the openassessment events: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
if len(row) < 1:
self.logWarn("Track log line %s: encountered empty partial row while processing an openassessment event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
try:
eventType = record['event_type']
except KeyError:
# Cant' really happen, b/c event_type is what triggers call
# to this method. But...:
self.logWarn("Track log line %s: encountered openassessment event event without event_type: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
# Give the OpenAssessment table row we are constructing
# the same key as the current row in EdxTrackEvent:
currEventRowId = row[0]
openAssessmentDict = OrderedDict()
openAssessmentDict['event_table_id'] = currEventRowId
openAssessmentDict['event_type'] = eventType
openAssessmentDict['anon_screen_name'] = self.getValInRow(row, 'anon_screen_name')
openAssessmentDict['score_type'] = eventType
openAssessmentDict['score_type'] = ''
openAssessmentDict['submission_uuid'] = ''
openAssessmentDict['edx_anon_id'] = ''
openAssessmentDict['time'] = self.getValInRow(row, 'time')
openAssessmentDict['time_aux'] = ''
openAssessmentDict['course_display_name'] = self.getValInRow(row, 'course_display_name')
openAssessmentDict['resource_display_name'] = ''
openAssessmentDict['resource_id'] = ''
openAssessmentDict['submission_text'] = ''
openAssessmentDict['feedback_text'] = ''
openAssessmentDict['comment_text'] = ''
openAssessmentDict['attempt_num'] = -1
openAssessmentDict['options'] = ''
openAssessmentDict['corrections'] = ''
openAssessmentDict['points'] = ''
# Fill in the above columns depending on the type of
# openassessment event:
if eventType == 'openassessmentblock.get_peer_submission':
openAssessmentDict['course_display_name'] = eventDict.get('course_id','')
openAssessmentDict['resource_id'] = eventDict.get('item_id','')
openAssessmentDict['submission_uuid'] = eventDict.get('submission_returned_uuid','')
openAssessmentDict['edx_anon_id'] = eventDict.get('requesting_student_id','')
elif eventType in ['openassessmentblock.peer_assess', 'openassessmentblock.self_assess']:
openAssessmentDict['edx_anon_id'] = eventDict.get('scorer_id','')
openAssessmentDict['comment_text'] = eventDict.get('feedback','')
openAssessmentDict['submission_uuid'] = eventDict.get('submission_uuid','')
openAssessmentDict['score_type'] = eventDict.get('score_type','')
partsArray = eventDict.get('parts','')
openAssessmentDict['feedback_text'] = self.makeAssessmentText(partsArray)
try:
rubricDict = openAssessmentDict['rubric']
rubric = rubricDict['content_hash']
openAssessmentDict['options'] = rubric
except KeyError:
pass
openAssessmentDict['time'] = eventDict.get('scored_at', '')
elif eventType == 'openassessmentblock.submit_feedback_on_assessments':
openAssessmentDict['submission_uuid'] = eventDict.get('submission_uuid','')
openAssessmentDict['feedback_text'] = eventDict.get('feedback_text','')
# checkbox options that learner selected to evaluate:
checkboxOptions = eventDict.get('options','')
if type(checkboxOptions) == list:
openAssessmentDict['options'] = str(checkboxOptions)
elif eventType == 'openassessment.student_training_assess_example':
openAssessmentDict['submission_uuid'] = eventDict.get('submission_uuid','')
correctionsDict = eventDict.get('corrections', None)
openAssessmentDict['corrections'] = self.makeCorrectionsText(correctionsDict, 'Instructor choices in rubric: ')
openAssessmentDict['options'] = self.makeCriterionChoiceText(eventDict.get('options_selected', None), 'Student choices in rubric: ')
elif eventType == 'openassessment.create_submission':
# The 'answer' field is a dict w/ flds 'text' and 'file_upload_key'
# We ignore the file upload key:
try:
openAssessmentDict['submission_text'] = eventDict['answer']['text']
except (KeyError, TypeError):
pass
openAssessmentDict['time'] = eventDict.get('created_at','')
openAssessmentDict['attempt_num'] = eventDict.get('attempt_number_at','')
openAssessmentDict['submission_uuid'] = eventDict.get('submission_uuid','')
openAssessmentDict['time_aux'] = eventDict.get('submitted_at','')
elif (eventType == 'openassessment.save_submission') or\
(eventType == 'openassessmentblock.save_submission'):
# The 'saved_response' field is a dict w/ flds 'text' and 'file_upload_key'
# We ignore the file upload key:
try:
openAssessmentDict['submission_text'] = eventDict['saved_response']['text']
except (KeyError, TypeError):
pass
elif eventType == 'openassessment.student_training_assess_example':
try:
openAssessmentDict['corrections'] = self.makeTrainingAssessExample(eventDict['corrections'], AssessmentOptionSource.INSTRUCTOR)
except (KeyError, TypeError):
pass
openAssessmentDict['submission_uuid'] = eventDict.get('submission_uuid','')
try:
openAssessmentDict['submissions_text'] = self.makeTrainingAssessExample(eventDict['options_selected'], AssessmentOptionSource.LEARNER)
except (KeyError, TypeError):
pass
elif eventType == 'openassessment.upload_file':
fileType = eventDict.get('fileName', '')
fileName = eventDict.get('fileType', '')
fileSize = eventDict.get('fileSize', '')
openAssessmentDict['resource_display_name'] = 'File %s: %s (%s)' % (fileName, fileType, fileSize)
resource_id = openAssessmentDict['resource_id']
if len(resource_id) > 0:
displayName = self.findResourceDisplayName(resource_id)
openAssessmentDict['resource_display_name'] = '' if displayName is None else displayName
if len(openAssessmentDict['resource_display_name']) == 0:
displayName = self.findModuleNameInEventContext(record)
openAssessmentDict['resource_display_name'] = '' if displayName is None else displayName
self.pushOpenAssessmentInfo(openAssessmentDict)
return row
def makeAssessmentText(self, peerOrSelfAssessPartsField):
'''
Takes the 'parts' field array of openassessmentblock.peer_assess and openassessmentblock.self_assess
events, and returns a single, reasonably formulated string.
The parameter is an array of triplets: criterion, option, and feedback.
The criterion contains 'points_possible' and 'name', which is presumably
the rubric name. The 'option' is the rubric option a learner chose,
and 'feedback' is text.
Example substring of the event's 'part' field:
"parts": [{"criterion": {"points_possible": 3,
"name": "1"},
"option": {"points": 3,
"name": "3"},
"feedback": ""},
{"criterion": {"points_possible": 3,
"name": "0"},
"option": {"points": 2,
"name": "2"},
"feedback": ""},
{"criterion": {"points_possible": 3,
"name": "Content"},
"option": {"points": 3,
"name": "Excellent"},
"feedback": ""},
{"criterion": {"points_possible": 3,
"name": "2"},
"option": {"points": 3,
"name": "3"},
"feedback": ""},
{"criterion": {"points_possible": 3,
"name": "Ideas"},
"option": {"points": 2,
"name": "Good"},
"feedback": ""}
]
:param peerOrSelfAssessPartsField: All information about one feedback by one or more rubrics
:type peerOrSelfAssessPartsField: [{int,string}, string, string]
:returns string that summarizes all entries
:rtype string
'''
resTxt = ''
if peerOrSelfAssessPartsField is None:
return resTxt
if type(peerOrSelfAssessPartsField) != list:
self.logWarn("Track log line %s: parts field of openassessmentblock.peer_assess or openassessmentblock.self_assess not an array: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(peerOrSelfAssessPartsField)))
for partsStruct in peerOrSelfAssessPartsField:
# partsStruct is something like:
# {'criterion': {'points_possible': 3, 'name': '1'}, 'option': {'points': 3, 'name': '3'}, 'feedback': ''}
pointsPossible = criterionName = optionPoints = optionName = feedback = ''
# The TypeError catching below is to protect
# against the inner getitem returning null:
try:
pointsPossible = partsStruct['criterion']['points_possible']
except (KeyError,TypeError):
pass
try:
criterionName = partsStruct['criterion']['name']
except (KeyError,TypeError):
pass
try:
optionPoints = partsStruct['option']['points']
except (KeyError,TypeError):
pass
try:
optionName = partsStruct['option']['name']
except (KeyError,TypeError):
pass
try:
feedback = partsStruct['feedback']
except (KeyError,TypeError):
pass
val = 'Criterion %s (max points %s): option %s: %s. Feedback: %s; ' % (criterionName,pointsPossible,optionName, optionPoints, feedback)
resTxt += val
return resTxt
def makeTrainingAssessExample(self, trainingResultDict, learnerSrcOrInstructorSrc):
'''
When peer graders are trained, their results are in
event openassessment.student_training_assess_example.
This method accepts one field of that event, a dict
{criterion_name: option_name}.
We create a single string of these values. Example:
"options_selected": {"1": "3", "0": "2", "Content": "Good", "2": "3", "Ideas": "0"}
The trainingResultDict passed in would just be the dict, without
the "options_selected".
:param trainingResultDict:
:type trainingResultDict:
:param learnerSrcOrInstructorSrc: whether dict was from instructor or from learner
:type learnerSrcOrInstructorSrc: AssessmentOptionSource
'''
if type(trainingResultDict) != dict:
return ''
resTxt = ''
for criterionName in trainingResultDict.keys():
try:
criterionValue = trainingResultDict[criterionName]
if learnerSrcOrInstructorSrc == AssessmentOptionSource.INSTRUCTOR:
resTxt += "Criterion %s's instructor-chosen option: %s;" % (criterionName, criterionValue)
else:
resTxt += "Criterion %s's learner-chosen option: %s;" % (criterionName, criterionValue)
except (KeyError, TypeError):
pass
return resTxt
def makeCorrectionsText(self, correctionsDict, preamble=''):
'''
In openassessment.student_training_assess_example events, the
corrections field contains a dict whose keys name a rubric
criterion, and values contain the instructor defined correct choices
for that criterion. Return a string suitable for placing in the 'corrections'
column.
:param correctionsDict: dict containing criterion name/instructor truth-choice
:type correctionsDict: {string : string}
:return string listing criterion option choices suitable for placement in 'corrections' column. If
passed-in optionsDict is None or empty, an empty string is returned.
'''
if correctionsDict is None or len(correctionsDict) == 0:
return ''
resTxt = preamble
for correctionCriterion in correctionsDict.keys():
resTxt += "Criterion %s: %s; " % (correctionCriterion, correctionsDict[correctionCriterion])
return resTxt
def makeCriterionChoiceText(self, optionsDict, preamble=''):
'''
In openassessment.student_training_assess_example the
'options_selected' dict contains key/value pairs in which
the key names a criterion in a rubric, and value contains
a learner's choice in that rubric criterion. Return a
string suitable for placing in the 'options' column.
:param optionsDict: dict containing criterionName/chosenValue pairs
:type optionsDict: {string : string}
:param preamble: any text to put in front of the automatically generated result string
:type preamble: string
:return string listing criterion option choices suitable for placement in 'options' column. If
passed-in optionsDict is None or empty, an empty string is returned.
:rtype string
'''
if optionsDict is None or len(optionsDict) == 0:
return ''
resTxt = preamble
for criterion in optionsDict.keys():
resTxt += "Criterion %s: %s; " % (criterion, optionsDict[criterion])
return resTxt
def handlePathStyledEventTypes(self, record, row, event):
'''
Called when an event type is a long path-like string.
Examples::
/courses/OpenEdX/200/Stanford_Sandbox/modx/i4x://OpenEdX/200/combinedopenended/5fb3b40e76a14752846008eeaca05bdf/check_for_score
/courses/Education/EDUC115N/How_to_Learn_Math/modx/i4x://Education/EDUC115N/peergrading/ef6ba7f803bb46ebaaf008cde737e3e9/is_student_calibrated",
/courses/Education/EDUC115N/How_to_Learn_Math/courseware
Most have action instructions at the end, some don't. The ones that don't
have no additional information. We drop those events.
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in event %s." %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
# Interesting info is hidden in the event_type field of this
# type of record: the embedded hash string corresponds to a
# sometimes illuminating entry in the modulestore's 'metadata.display_name'
# field. We use our ModulestoreMapper instance self.hashMapper to
# get that information, and insert it in the resource_display_name field
# of the edXTrackEvent table (setResourceDisplayName() does nothing if
# given a None, so the call is safe):
self.setResourceDisplayName(row, record.get('event_type', None))
eventDict = self.ensureDict(event)
if eventDict is None:
self.logWarn("Track log line %s: event is not a dict in path-styled event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
try:
postDict = eventDict['POST']
except KeyError:
self.logWarn("Track log line %s: event in path-styled event is not GET styled: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
# Grab the 'verb' at the end, if there is one:
eventType = record['event_type']
if eventType is None or not isinstance(eventType, basestring):
return row
pieces = eventType.split('/')
verb = pieces[-1]
if verb == 'is_student_calibrated':
return self.subHandleIsStudentCalibrated(row, postDict)
elif verb == 'goto_position':
return self.subHandleGotoPosition(row, postDict)
elif verb == 'get_last_response':
# No additional info to get
return row
elif verb == 'problem':
return self.subHandleProblem(row, postDict)
elif verb == 'save_answer':
return self.subHandleSaveAnswer(row, postDict)
elif verb == 'check_for_score':
# No additional info to get
return row
elif verb == 'problem_get':
# No additional info to get
return row
elif verb == 'get_legend':
# No additional info to get
return row
elif verb == 'problem_show':
# No additional info to get
return row
elif verb == 'problem_check':
return self.subHandleProblemCheckInPath(row, postDict)
elif verb == 'save_grade':
return self.subHandleSaveGrade(row, postDict)
def handleForumEvent(self, record, row, event):
eventDict = self.ensureDict(event)
if eventDict is None:
self.logWarn("Track log line %s: event is not a dict in path-styled event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
self.setValInRow(row, 'submission_id', str(event.get('query', None)))
self.setValInRow(row, 'page', str(event.get('page', None)))
self.setValInRow(row, 'success', str(event.get('total_results', None)))
return row
def subHandleIsStudentCalibrated(self, row, eventDict):
'''
Called from handlePathStyledEventTypes(). Event dict looks like this::
{\"location\": [\"i4x://Education/EDUC115N/combinedopenended/0d67667941cd4e14ba29abd1542a9c5f\"]}, \"GET\": {}"
The caller is expected to have verified the legitimacy of EventDict
:param row:
:type row:
:param eventDict:
:type eventDict:
'''
# Get location:
try:
location = eventDict['location']
except KeyError:
self.logWarn("Track log line %s: no location field provided in is_student_calibrated event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(eventDict)))
return row
try:
# The 'location' is an array of strings. Turn them into one string:
location = '; '.join(location)
self.setValInRow(row, 'question_location', location)
except TypeError:
self.logWarn("Track log line %s: location field provided in is_student_calibrated event contains a non-string: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(eventDict)))
return row
return row
def subHandleGotoPosition(self, row, eventDict):
'''
Called from handlePathStyledEventTypes(). Event dict looks like this::
{\"position\": [\"2\"]}, \"GET\": {}}"
The caller is expected to have verified the legitimacy of EventDict
:param row:
:type row:
:param eventDict:
:type eventDict:
'''
# Get location:
try:
position = eventDict['position']
except KeyError:
self.logWarn("Track log line %s: no position field provided in got_position event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(eventDict)))
return row
try:
# The 'position' is an array of ints. Turn them into one string:
position = '; '.join(position)
self.setValInRow(row, 'position', position)
except TypeError:
self.logWarn("Track log line %s: position field provided in goto_position event contains a non-string: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(eventDict)))
return row
return row
def subHandleProblem(self, row, eventDict):
'''
Called from handlePathStyledEventTypes(). Event dict looks like this::
{\"location\": [\"i4x://Education/EDUC115N/combinedopenended/0d67667941cd4e14ba29abd1542a9c5f\"]}, \"GET\": {}}"
The caller is expected to have verified the legitimacy of EventDict
:param row:
:type row:
:param eventDict:
:type eventDict:
'''
# Get location:
try:
location = eventDict['location']
except KeyError:
self.logWarn("Track log line %s: no location field provided in is_student_calibrated event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(eventDict)))
return row
try:
# The 'location' is an array of strings. Turn them into one string:
location = '; '.join(location)
self.setValInRow(row, 'question_location', location)
except TypeError:
self.logWarn("Track log line %s: location field provided in is_student_calibrated event contains a non-string: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(eventDict)))
return row
return row
def subHandleSaveAnswer(self, row, eventDict):
'''
Called from handlePathStyledEventTypes(). Event dict looks like this::
{\"student_file\": [\"\"],
\"student_answer\": [\"Students will have to use higher level thinking to describe the...in the race. \"],
\"can_upload_files\": [\"false\"]}, \"GET\": {}}"
The caller is expected to have verified the legitimacy of EventDict
:param row:
:type row:
:param eventDict:
:type eventDict:
'''
student_file = eventDict.get('student_file', [''])
student_answer = eventDict.get('student_answer', [''])
can_upload_file = eventDict.get('can_upload_files', [''])
# All three values are arrays. Turn them each into a semicolon-
# separated string:
try:
student_file = '; '.join(student_file)
except TypeError:
self.logWarn("Track log line %s: student_file field provided in save_answer event contains a non-string: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(eventDict)))
student_file = ''
self.setValInRow(row, 'student_file', student_file)
try:
student_answer = '; '.join(student_answer)
# Ensure escape of comma, quotes, and CR/LF:
student_answer = self.makeInsertSafe(student_answer)
except TypeError:
self.logWarn("Track log line %s: student_answer field provided in save_answer event contains a non-string: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(eventDict)))
student_answer = ''
self.setValInRow(row, 'long_answer', student_answer)
try:
can_upload_file = '; '.join(can_upload_file)
except TypeError:
#self.logWarn("Track log line %s: can_upload_file field provided in save_answer event contains a non-string: '%s'" %\
# (self.jsonToRelationConverter.makeFileCitation(), str(eventDict)))
can_upload_file = str(can_upload_file)
self.setValInRow(row, 'can_upload_file', can_upload_file)
return row
def subHandleSaveGrade(self, row, postDict):
'''
Get something like::
"{\"POST\": {\"submission_id\": [\"60611\"],
\"feedback\": [\"<p>This is a summary of a paper stating the positive effects of a certain hormone on face recognition for people with disrupted face processing [1].\\n<br>\\n<br>Face recognition is essential for social interaction and most people perform it effortlessly. But a surprisingly high number of people \\u2013 one in forty \\u2013 are impaired since birth in their ability to recognize faces [2]. This condition is called 'developmental prosopagnosia'. Its cause isn\\u2"
:param row:
:type row:
:param postDict:
:type postDict:
'''
if postDict is None:
return row
submissionID = postDict.get('submission', None)
feedback = postDict.get('feedback', None)
if feedback is not None:
feedback = self.makeInsertSafe(str(feedback))
self.setValInRow(row, 'submission_id', submissionID)
self.setValInRow(row, 'long_answer', feedback)
return row
def subHandleProblemCheckInPath(self, row, answersDict):
'''
Get dict like this::
{\"input_i4x-Medicine-HRP258-problem-f0b292c175f54714b41a1b05d905dbd3_2_1\": [\"choice_3\"]},
\"GET\": {}}"
:param row:
:type row:
:param answersDict:
:type answersDict:
'''
if answersDict is not None:
# Receive all the Answer table keys generated for
# the answers, and a dict mapping each key
# to the problem ID to which that key's row
# in the Answer refers:
(answersFKeys, answerToProblemMap) = self.pushAnswers(answersDict)
for answerFKey in answersFKeys:
self.setValInRow(row, 'answer_fk', answerFKey, self.mainTableName)
if answerFKey is not None:
# For convenience: enter the Answer's problem ID
# in the main table's problemID field:
problemID = answerToProblemMap[answerFKey]
self.setValInRow(row, 'problem_id', problemID)
# Try to look up the human readable display name
# of the problem, and insert it into the main
# table's resource_display_name field:
self.setResourceDisplayName(row, problemID)
rowInfoTriplet = self.resultTriplet(row, self.mainTableName)
self.jsonToRelationConverter.pushToTable(rowInfoTriplet)
# The next row keeps its eventID, but needs its own
# primary key (in _id):
self.setValInRow(row, '_id', self.getUniqueID())
return []
def handleAjaxLogin(self, record, row, event, eventType):
'''
Events look like this::
"{\"POST\": {\"password\": \"********\", \"email\": [\"emil.smith@gmail.com\"], \"remember\": [\"true\"]}, \"GET\": {}}"
:param record:
:type record:
:param row:
:type row:
:param event:
:type event:
:param eventType:
:type eventType:
'''
if event is None:
self.logWarn("Track log line %s: missing event text in event %s." %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
eventDict = self.ensureDict(event)
if eventDict is None:
self.logWarn("Track log line %s: event is not a dict in event: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
try:
postDict = eventDict['POST']
except KeyError:
self.logWarn("Track log line %s: event in login_ajax is not GET styled: '%s'" %\
(self.jsonToRelationConverter.makeFileCitation(), str(event)))
return row
email = postDict.get('email', None)
# We get remember here, but don't carry it to the relational world:
remember = postDict.get('remember', None) # @UnusedVariable
if email is not None:
# Stick email into the screen_name field. But flatten
# the array of email addresses to a string (I've only
# seen single-element arrays anyway):
try:
email = '; '.join(email)
except TypeError:
pass
self.setValInRow(row, 'anon_screen_name', self.hashGeneral(email))
return row
def finish(self, includeCSVLoadCommands=False, outputDisposition=None):
'''
Called by json_to_relation parent after
the last insert for one file is done.
We do what's needed to close out the transform.
:param includeCSVLoadCommands: if True, then the main output file will just have
table locking, and turning off consistency checks. In that case we
insert CSV file load statements. Otherwise the output file already
has INSERT statements, and we just add table unlocking, etc.:
:type includeCSVLoadCommands: Boolean
:param outputDisposition: an OutputDisposition that offers a method getCSVTableOutFileName(tableName),
which provides the fully qualified file name that is the
destination for CSV rows destined for a given table.
:type outputDisposition: OutputDisposition
'''
if includeCSVLoadCommands:
self.jsonToRelationConverter.pushString(self.createCSVTableLoadCommands(outputDisposition))
# Unlock tables, and return foreign key checking to its normal behavior:
self.jsonToRelationConverter.pushString(self.dumpPostscript1)
# Copy the temporary Account entries from
# the tmp table in Edx to the final dest
# in EdxPrivate:
self.createMergeAccountTbl()
# Same for the EventIp table:
self.createMergeEventIpTbl()
# Restore various defaults:
self.jsonToRelationConverter.pushString(self.dumpPostscript2)
def createCSVTableLoadCommands(self, outputDisposition):
'''
Create a series of LOAD INFILE commands as a string. One load command
for each of the Edx track log tables.
:param outputDisposition: an OutputDisposition that offers a method getCSVTableOutFileName(tableName),
which provides the fully qualified file name that is the
destination for CSV rows destined for a given table.
:type outputDisposition: OutputDisposition
'''
csvLoadCommands = "SET sql_log_bin=0;\n"
csvLoadCommands += "SET autocommit=0;\n"
for tableName in ['LoadInfo', 'InputState', 'State', 'CorrectMap', 'Answer', 'Account', 'EventIp', 'EdxTrackEvent', 'ABExperiment', 'OpenAssessment']:
filename = outputDisposition.getCSVTableOutFileName(tableName)
# SQL statements for LOAD INFILE all .csv tables in turn. Only used
# when no INSERT statement dump is being generated:
csvLoadCommands += "LOAD DATA LOCAL INFILE '%s' IGNORE INTO TABLE %s FIELDS OPTIONALLY ENCLOSED BY \"'\" TERMINATED BY ','; \n" %\
(filename, tableName)
csvLoadCommands += "SET autocommit=1;\n"
csvLoadCommands += "SET sql_log_bin=1;\n"
return csvLoadCommands
def createMergeAccountTbl(self):
'''
Called at the very end of a load: copies all the entries
from the temporary Account table in the Edx db to the permanent
Account table in EdxPrivate. Then DROPs the tmp Account table:
'''
colNameList = ''
for colName in self.schemaAccountTbl.keys():
colNameList += colName + ','
# Snip off the last comma:
colNameList = colNameList[:-1]
copyStatement = "REPLACE INTO EdxPrivate.Account (" + colNameList + ")" +\
" SELECT " + colNameList + " FROM Edx.Account;\n" +\
"DROP TABLE Edx.Account;\n"
self.jsonToRelationConverter.pushString(copyStatement)
def createMergeEventIpTbl(self):
'''
Called at the very end of a load: copies all the entries
from the temporary EventIp table in the Edx db to the permanent
EventIp table in EdxPrivate. Then DROPs the tmp Edx.EventIp table:
'''
colNameList = ''
for colName in self.schemaEventIpTbl.keys():
colNameList += colName + ','
# Snip off the last comma:
colNameList = colNameList[:-1]
copyStatement = "REPLACE INTO EdxPrivate.EventIp (" + colNameList + ")" +\
" SELECT " + colNameList + " FROM Edx.EventIp;\n" +\
"DROP TABLE Edx.EventIp;\n"
self.jsonToRelationConverter.pushString(copyStatement)
def handleBadJSON(self, row, offendingText):
'''
When JSON parsing fails, place the offending text into
longAnswer. Happens, for instance, when student answers have embedded
quotes that confused some upstream load process.
:param row:
:type row:
:param offendingText:
:type offendingText:
'''
self.setValInRow(row, 'badly_formatted', self.makeInsertSafe(offendingText))
return row
def getEventTimeFromLogTimeString(self, eventTimeStr):
try:
# Time strings in the log may or may not have a UTF extension:
# '2013-07-18T08:43:32.573390:+00:00' vs '2013-07-18T08:43:32.573390'
# For now we ignore time zone. Observed log samples are
# all universal +/- 0:
maybeOffsetDir = eventTimeStr[-6]
if maybeOffsetDir == '+' or maybeOffsetDir == '-':
eventTimeStr = eventTimeStr[0:-6]
eventDateTime = datetime.datetime.strptime(eventTimeStr, '%Y-%m-%dT%H:%M:%S.%f')
return eventDateTime
except ValueError:
raise ValueError("Bad event time format: '%s'" % eventTimeStr)
def getEventTimeFromLogRecord(self,logRecord):
'''
Extract the time field from raw JSON log record.
:param logRecord:
:type logRecord:
:return: date and time as object
:rtype: datetime.datetime
'''
try:
eventTimeStr = logRecord['time']
return self.getEventTimeFromLogTimeString(eventTimeStr)
except KeyError:
raise ValueError("No event time.")
def get_course_id(self, record):
'''
Given a 'pythonized' JSON tracking record object, find
the course URL, and extract the course name from it.
A number of different events occur, which do not contain
course IDs: server heartbeats, account creation, dashboard
accesses. Among them are logins, which look like this::
{"username": "",
"host": "class.stanford.edu",
"event_source": "server",
"event_type": "/accounts/login",
"time": "2013-06-14T00:31:57.661338",
"ip": "98.230.189.66",
"record": "{
\"POST\": {},
\"GET\": {
\"next\": [\"/courses/Medicine/HRP258/Statistics_in_Medicine/courseware/80160e.../\"]}}",
"agent": "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20100101
Firefox/21.0",
"page": null
}
But also::
{"username": "RobbieH",
"host": "class.stanford.edu",
...
"record": {"failure": "closed", "state": {"student_answers": {"i4x-Medicine-HRP258-problem-4cd47ea861f542488a20691ac424a002_7_1": "choice_1", "i4x-Medicine-HRP258-problem-4cd47ea861f542488a20691ac424a002_2_1": "choice_3", "i4x-Medicine-HRP258-problem-4cd47ea861f542488a20691ac424a002_9_1": ["choice_0", "choice_1"], "i4x-Medicine-HRP258-problem-4cd47ea861f542488a20691ac424a002_6_1": "choice_0", "i4x-Medicine-HRP258-problem-4cd47ea861f542488a20691ac424a002_8_1": ["choice_0", "choice_1", "choice_2", "choice_3", "choice_4"],
Notice the 'record' key's value being a *string* containing JSON, rather than
a nested JSON object. This requires special attention. Buried inside
that string is the 'next' tag, whose value is an array with a long (here
partially elided) hex number. This is where the course number is
extracted.
In some cases the newish 'context' field in the record
(i.e. outside the value of the 'record' label, has a course_id
field we need to use.
:param record: JSON record of an edx tracking event as internalized dict
:type record: Dict<String,Dict<<any>>
:return: two-tuple: full name of course in which record occurred, and descriptive name.
None if course ID could not be obtained.
:rtype: {(String,String) | None}
'''
# Start with the newer tracking log convention of having
# a course_id field in the top-level record:
try:
full_course_name = course_id = course_display_name = record['context']['course_id']
return (full_course_name, course_id, course_display_name)
except:
pass
course_id = ''
eventSource = record.get('event_source', None)
if eventSource is None:
return ('','','')
if eventSource == 'server':
# get course_id from record type
eventType = record.get('event_type', None)
if eventType is None:
return('','','')
if eventType == u'/accounts/login':
try:
post = json.loads(str(record.get('event', None)))
except:
return('','','')
if post is not None:
getEntry = post.get('GET', None)
if getEntry is not None:
try:
fullCourseName = getEntry.get('next', [''])[0]
except:
return('','','')
else:
return('','','')
else:
return('','','')
elif eventType.startswith('/courses'):
courseID = self.extractShortCourseID(eventType)
return(courseID, courseID, self.getCourseDisplayName(eventType))
elif eventType.find('problem_') > -1:
event = record.get('event', None)
if event is None:
return('','','')
courseID = self.extractCourseIDFromProblemXEvent(event)
return(courseID, courseID, '')
else:
fullCourseName = record.get('event_type', '')
else:
fullCourseName = record.get('page', '')
# Abvove logic makes an error for '/dashboard' events:
# it assigns '/dashboard' to the fullCourseName. Correct
# this:
if fullCourseName == '/dashboard' or fullCourseName == '/heartbeat':
fullCourseName = ""
if len(fullCourseName) > 0:
course_display_name = self.extractShortCourseID(fullCourseName)
else:
course_display_name = ''
if len(course_id) == 0:
course_id = fullCourseName
return (fullCourseName, course_id, course_display_name)
def getQuarter(self,eventTime):
'''
Returns string <quarter><year. as derived from event date:
fallQuarterStartDate = Year + "-09-01T00:00:00Z";
winterQuarterStartDate = Year + "-12-01T00:00:00Z";
springQuarterStartDate = Year + "-03-01T00:00:00Z";
summerQuarterStartDate = Year + "-06-01T00:00:00Z";
summerQuarterEndDate = Year + "-08-31T00:00:00Z";
:param eventTime:
:type eventTime:
'''
eventYear = eventTime.year
if eventTime.month >= 1 and eventTime.month < 3 or eventTime.month == 12:
quarter = 'winter'
elif eventTime.month >= 3 and eventTime.month < 6:
quarter = 'spring'
elif eventTime.month >= 6 and eventTime.month < 9:
quarter = 'summer'
else:
quarter = 'fall'
return str(quarter) + str(eventYear)
def getCourseDisplayName(self, fullCourseName):
'''
Given a
:param fullCourseName:
:type fullCourseName:
'''
hashStr = self.extractOpenEdxHash(fullCourseName)
if hashStr is None:
return None
courseShortName = self.hashMapper.getCourseShortName(hashStr)
if courseShortName is not None:
return self.hashMapper[courseShortName]
else:
return None
def extractShortCourseID(self, fullCourseStr):
if fullCourseStr is None:
return ''
courseNameFrags = fullCourseStr.split('/')
course_id = ''
if 'courses' in courseNameFrags:
i = courseNameFrags.index('courses')
course_id = "/".join(map(str, courseNameFrags[i+1:i+4]))
return course_id
def extractCourseIDFromProblemXEvent(self, event):
'''
Given the 'event' field of an event of type problem_check, problem_check_fail, problem_save...,
extract the course ID. Ex from save_problem_check::
"event": {"success": "correct", "correct_map": {"i4x-Medicine-HRP258-problem-8dd11b4339884ab78bc844ce45847141_2_1": {"hint": "", "hintmode": null,...
:param event:
:type event:
'''
if event is None:
return None
# isolate '-Medicine-HRP258-problem-8dd11b4339884ab78bc844ce45847141_2_1":' from:
# ' {"success": "correct", "correct_map": {"i4x-Medicine-HRP258-problem-8dd11b4339884ab78bc844ce45847141_2_1": {"hint": "", "hintmode": null'
match = EdXTrackLogJSONParser.problemXFindCourseID.search(str(event))
if match is None:
return None
# the match obj's groups is now: '-Medicine-HRP258-problem-8dd11b4339884ab78bc844ce45847141_2_1"'
# Split into ['', 'Medicine', 'HRP258', 'problem', '8dd11b4339884ab78bc844ce45847141_2_1"']
parts = match.groups()[0].split('-')
try:
return "-".join([parts[1], parts[2]])
except IndexError:
return None
def ensureDict(self, event):
'''
If event is either a dict, or a string with a dict
definition inside, returns a dict. Else returns None
:param event:
:type event:
'''
if isinstance(event, dict):
return event
else:
try:
# Maybe it's a string: make a dict from the string:
res = eval(event)
if isinstance(res, dict):
return res
else:
return None
except Exception:
return None
def ensureArray(self, event):
'''
If event is either a Python array, or a string with an array
definition inside, returns the array. Else returns None
:param event:
:type event:
'''
if isinstance(event, list):
return event
else:
try:
# Maybe it's a string: make an array from the string:
res = eval(event)
if isinstance(res, list):
return res
else:
return None
except Exception:
return None
def makeInsertSafe(self, unsafeStr):
'''
Makes the given string safe for use as a value in a MySQL INSERT
statement. Looks for embedded CR or LFs, and turns them into
semicolons. Escapes commas and single quotes. Backslash is
replaced by double backslash. This is needed for unicode, like
\0245 (invented example)
:param unsafeStr: string that possibly contains unsafe chars
:type unsafeStr: String
:return: same string, with unsafe chars properly replaced or escaped
:rtype: String
'''
#return unsafeStr.replace("'", "\\'").replace('\n', "; ").replace('\r', "; ").replace(',', "\\,").replace('\\', '\\\\')
if unsafeStr is None or not isinstance(unsafeStr, basestring) or len(unsafeStr) == 0:
return ''
# Check for chars > 128 (illegal for standard ASCII):
for oneChar in unsafeStr:
if ord(oneChar) > 128:
# unidecode() replaces unicode with approximations.
# I tried all sorts of escapes, and nothing worked
# for all cases, except this:
unsafeStr = unidecode(unicode(unsafeStr))
break
return unsafeStr.replace('\n', "; ").replace('\r', "; ").replace('\\', '').replace("'", r"\'")
def makeJSONSafe(self, jsonStr):
'''
Given a JSON string, make it safe for loading via
json.loads(). Backslashes before chars other than
any of \bfnrtu/ are escaped with a second backslash
:param jsonStr:
:type jsonStr:
'''
res = EdXTrackLogJSONParser.JSON_BAD_BACKSLASH_PATTERN.sub(self.fixOneJSONBackslashProblem, jsonStr)
return res
def fixOneJSONBackslashProblem(self, matchObj):
'''
Called from the pattern.sub() method in makeJSONSafe for
each match of a bad backslash in jsonStr there. Returns
the replacement string to use by the caller for the substitution.
Ex. a match received from the original string "\d'Orsay" returns
"\\d".
:param matchObj: a Match object resulting from a regex search/replace
call.
:type matchObj: Match
'''
return "\\\\" + matchObj.group(1)
def rescueBadJSON(self, badJSONStr, row=[]):
'''
When JSON strings are not legal, we at least try to extract
the username, host, session, event_type, event_source, and event fields
verbatim, i.e. without real parsing. We place those in the proper
fields, and leave it at that.
:param badJSONStr:
:type badJSONStr:
'''
screen_name = self.tryJSONExtraction(EdXTrackLogJSONParser.searchPatternDict['username'], badJSONStr)
#host = self.tryJSONExtraction(EdXTrackLogJSONParser.searchPatternDict['host'], badJSONStr)
session = self.tryJSONExtraction(EdXTrackLogJSONParser.searchPatternDict['session'], badJSONStr)
event_source = self.tryJSONExtraction(EdXTrackLogJSONParser.searchPatternDict['event_source'], badJSONStr)
event_type = self.tryJSONExtraction(EdXTrackLogJSONParser.searchPatternDict['event_type'], badJSONStr)
time = self.tryJSONExtraction(EdXTrackLogJSONParser.searchPatternDict['time'], badJSONStr)
ip = self.tryJSONExtraction(EdXTrackLogJSONParser.searchPatternDict['ip'], badJSONStr)
event = self.tryJSONExtraction(EdXTrackLogJSONParser.searchPatternDict['event'], badJSONStr)
if isinstance(screen_name, basestring):
self.setValInRow(row, 'anon_screen_name', self.hashGeneral(screen_name))
else:
self.setValInRow(row, 'anon_screen_name', '')
#self.setValInRow(row, 'host', host)
self.setValInRow(row, 'session', session)
self.setValInRow(row, 'event_source', event_source)
self.setValInRow(row, 'event_type', event_type)
self.setValInRow(row, 'time', time)
try:
ip_country = self.getThreeLetterCountryCode(ip)
except:
ip_country = ''
self.setValInRow(row, 'ip_country', ip_country)
self.setValInRow(row, 'badly_formatted', self.makeInsertSafe(event))
def tryJSONExtraction(self, pattern, theStr):
m = pattern.search(theStr)
try:
return None if m is None else m.group(1)
except:
return None
def getUniqueID(self):
'''
Generate a universally unique key with
all characters being legal in MySQL identifiers.
'''
return str(uuid.uuid4()).replace('-','_')
def getZipAndCountryFromMailAddr(self, mailAddr, accountDict):
zipCodeMatch = EdXTrackLogJSONParser.zipCodePattern.findall(mailAddr)
if len(zipCodeMatch) > 0:
accountDict['zipcode'] = zipCodeMatch[-1]
else:
accountDict['zipcode'] = ''
# See whether the address includes a country:
# Last ditch: if we think we found a zip code,
# start out thinking US for the country:
if len(accountDict['zipcode']) > 0:
accountDict['country'] = 'USA'
else:
accountDict['country'] = ''
# Our zip code might be a different number,
# so do look for an explicit country:
splitMailAddr = re.split(r'\W+', mailAddr)
# Surely not the fastest, but I'm tired: pass
# a sliding window of four,three,bi, and unigrams
# over the mailing address to find a country
# specification:
for mailWordIndx in range(len(splitMailAddr)):
try:
fourgram = string.join([splitMailAddr[mailWordIndx],
splitMailAddr[mailWordIndx + 1],
splitMailAddr[mailWordIndx + 2],
splitMailAddr[mailWordIndx + 3]])
country = self.countryChecker.isCountry(fourgram)
if len(country) > 0:
accountDict['country'] = country
break
except IndexError:
pass
try:
trigram = string.join([splitMailAddr[mailWordIndx], splitMailAddr[mailWordIndx + 1], splitMailAddr[mailWordIndx + 2]])
country = self.countryChecker.isCountry(trigram)
if len(country) > 0:
accountDict['country'] = country
break
except IndexError:
pass
try:
bigram = string.join([splitMailAddr[mailWordIndx], splitMailAddr[mailWordIndx + 1]])
country = self.countryChecker.isCountry(bigram)
if len(country) > 0:
accountDict['country'] = country
break
except IndexError:
pass
unigram = splitMailAddr[mailWordIndx]
country = self.countryChecker.isCountry(unigram)
if len(country) > 0:
accountDict['country'] = country
break
# Make sure that zip code is empty unless address is USA:
if accountDict['country'] != 'USA':
accountDict['zipcode'] = ''
return accountDict
def anonymizeUser(self,screenName,email):
'''
Generate a user hash, using email if available,
else the screenName. (Sometimes either email or screenName are empty)
:param screenName: user screen name in system
:type screenName: string
:param email: user email address
:type email: string
:return: 40 byte hash
:rtype: string
'''
if len(email) > 0:
return self.hashGeneral(email)
else:
return self.hashGeneral(screenName)
def hashGeneral(self, username):
'''
Returns a ripemd160 40 char hash of the given name. Uses the
corresponding class method below.
:param username: name to be hashed
:type username: String
:return: hashed equivalent. Calling this function multiple times returns the same string
:rtype: String
'''
return EdXTrackLogJSONParser.makeHash(username)
@classmethod
def makeHash(cls, username):
'''
Returns a ripemd160 40 char hash of the given name.
:param username: name to be hashed
:type username: String
:return: hashed equivalent. Calling this function multiple times returns the same string
:rtype: String
'''
#return hashlib.sha224(username).hexdigest()
oneHash = hashlib.new('ripemd160')
oneHash.update(username)
return oneHash.hexdigest()
def extractOpenEdxHash(self, idStr):
'''
Given a string, such as::
i4x-Medicine-HRP258-videoalpha-7cd4bf0813904612bcd583a73ade1d54
or::
input_i4x-Medicine-HRP258-problem-98ca37dbf24849debcc29eb36811cb68_3_1_choice_3'
extract and return the 32 bit hash portion. If none is found,
return None. Method takes any string and finds a 32 bit hex number.
It is up to the caller to ensure that the return is meaningful. As
a minimal check, the method does ensure that there is at most one
qualifying string present; we know that this is the case with problem_id
and other strings.
:param idStr: problem, module, video ID and others that might contain a 32 bit OpenEdx platform hash
:type idStr: string
'''
if idStr is None:
return None
try:
match = EdXTrackLogJSONParser.findHashPattern.search(idStr)
except TypeError as e:
return None # Could not find hash pattern.
if match is not None:
return match.group(1)
else:
return None
def setResourceDisplayName(self, row, openEdxHash):
'''
Given an OpenEdx hash of problem ID, video ID, or course ID,
set the resource_display_name in the given row. The value
passed in may have the actual hash embedded in a larger
string, as in::
input_i4x-Medicine-HRP258-problem-7451f8fe15a642e1820767db411a4a3e_2_1
We fish it out of there. If the value is found, then it is
inserted into the given row.
:param row: current row's values
:type row: [<any>]
:param openEdxHash: 32-bit hash string encoding a problem, video, or class, or
such a 32-bit hash embedded in a larger string.
:type openEdxHash: String
'''
if openEdxHash is not None and len(openEdxHash) > 0:
displayName = self.findResourceDisplayName(openEdxHash)
if len(displayName) > 0:
self.setValInRow(row, 'resource_display_name', self.makeInsertSafe(displayName))
def findResourceDisplayName(self, openEdxHash):
'''
Given an OpenEdx hash of problem ID, video ID, or course ID,
set the resource_display_name in the given row. The value
passed in may have the actual hash embedded in a larger
string, as in::
input_i4x-Medicine-HRP258-problem-7451f8fe15a642e1820767db411a4a3e_2_1
We fish it out of there. If the value is found, then it is
inserted into the given row.
:param openEdxHash: 32-bit hash string encoding a problem, video, or class, or
such a 32-bit hash embedded in a larger string.
:type openEdxHash: String
:return a human readable name, if it exists, else empty string.
:rtype string
'''
displayName = ''
if openEdxHash is not None and len(openEdxHash) > 0:
# Fish out the actual 32-bit hash:
hashNum = self.extractOpenEdxHash(openEdxHash)
if hashNum is None:
displayName = ''
return displayName
# Get display name and add to main table as resource_display_name:
displayName = self.hashMapper.getDisplayName(hashNum)
if displayName is None:
displayName = ''
return displayName
def findModuleNameInEventContext(self, record):
'''
Track log JSON records of newer vintage sometimes have a context field
in which the human-readable form of a module is stored.
Retrieve that value and return it. If not found, return
empty string
:param record: full record dict
:type record: {}
:return human readable module name
:rtype string
'''
moduleName = ''
try:
moduleName = record['context']['module']['display_name']
except:
pass
return moduleName
def extractCanonicalCourseName(self, trackLogStr):
'''
Given a string believed to be the best course name
snippet from a log entry, use the modulestoreImporter's
facilities to get a canonical name. Inputs look like::
Medicine/HRP258/Statistics_in_Medicine
/courses/Education/EDUC115N/How_to_Learn_Math/modx/i4x://Education/EDUC115N/sequential/1b3ac347ca064b3eaaddbc27d4200964/goto_position
:param trackLogStr: string that hopefully contains a course short name
:type trackLogStr: String
:return: a string of the form org/courseShortName/courseTitle, or None if no course name
could be found in the given string.
:rtype: {String | None}
'''
# First, remove substrings that are obviously
# hashes that could match short course names:
trackLogStr = self.hexGE32Digits.sub('', trackLogStr)
# We go through all course short names, starting
# with the longest, in decreasing length. We
# select the first course short name that is
# embedded in the given trackLogStr. Proceeding
# by decreasing length is needed to avoid prematurely
# choosing a course short name like 'db', which easily
# matches a hash string.
for shortCourseName in self.courseNamesSorted:
if string.find(trackLogStr, shortCourseName) > -1:
return self.hashMapper[shortCourseName]
return None
def getThreeLetterCountryCode(self, ipAddr):
'''
Given an ip address string, return the corresponding
3-letter country code, or None if no country found.
This method could easily be modified to return the 2-letter code,
or the full country name.
:param ipAddr: IP address whose assigned country is to be found
:type ipAddr: String
:return: a three-letter country code
:rtype: String
'''
# Get the triplet (2-letter-country-code, 3-letter-country-code, country):
val = self.ipCountryDict.get(ipAddr, None)
if val is not None:
return val[1] # get 3-letter country code
else:
return None
| 46.625071
| 2,478
| 0.591018
|
d800f6b6d26f0bbd845520ae8bf0730b4d4a285b
| 5,201
|
py
|
Python
|
m5-101/content/code/equation-solver-solution.py
|
PaulCCCCCCH/m5-101
|
81201b00cd81c1747ea0cd5f042a09eda02d6d1c
|
[
"MIT"
] | 4
|
2021-03-25T13:15:38.000Z
|
2021-11-10T12:29:19.000Z
|
docs/content/code/equation-solver-solution.py
|
PaulCCCCCCH/m5-101
|
81201b00cd81c1747ea0cd5f042a09eda02d6d1c
|
[
"MIT"
] | null | null | null |
docs/content/code/equation-solver-solution.py
|
PaulCCCCCCH/m5-101
|
81201b00cd81c1747ea0cd5f042a09eda02d6d1c
|
[
"MIT"
] | 4
|
2021-03-25T13:18:10.000Z
|
2021-04-08T13:44:48.000Z
|
'''
Author: Chonghan Chen (paulcccccch@gmail.com)
-----
Last Modified: Thursday, 18th March 2021 3:06:59 pm
Modified By: Chonghan Chen (paulcccccch@gmail.com)
-----
Copyright (c) 2021 IceWould Information Technology Co., Ltd.
'''
import math
#################
### Section 1 ###
#################
def solve_linear(a, b):
"""
求解方程 ax + b = 0
"""
if a == 0 and b == 0:
return None
elif a == 0:
return []
else:
return [-b / a]
#################
### Section 2 ###
#################
def solve_with_formula(a, b, c):
"""
求解方程 ax^2 + bx + c = 0
"""
if a == 0:
return solve_linear(b, c)
delta = b ** 2 - 4 * a * c
if delta < 0:
return []
elif delta == 0:
return [-b / (2*a)]
else:
delta_sqrt = delta ** 0.5
return [(-b + delta_sqrt) / (2*a), (-b - delta_sqrt) / (2*a)]
#################
### Section 3 ###
#################
def d(f):
return lambda x: (f(x + 1e-6) - f(x)) / 1e-6
def solve_equation(f):
"""
求解方程 f = 0
"""
xi = 1
while abs(f(xi)) > 1e-6:
xi = xi - f(xi)/d(f)(xi)
return xi
def section1_test(case):
args = case[0] # (a, b)
ans = case[1] # correct ans
sol = solve_linear(args[0], args[1])
res = False
if sol == ans or (ans != None and len(ans) == 1 and sol != None and len(sol) == 1 and abs(sol[0] - ans[0]) < 1e-6):
res = True
print('Case ' + str(args[0]) + 'x^2+' + str(args[1]) + '=0: 正确结果为 ' + str(ans) + ',你的结果为 ' + str(sol) + '。')
if res:
print('检查通过。')
else:
print('检查失败。')
assert(res == True)
def section2_test(case):
args = case[0] # [a, b, c]
number_of_ans = case[1] # -1 for question with countless solutions
sol = solve_with_formula(args[0], args[1], args[2])
res = False
if number_of_ans == 1 and sol != None and len(sol) == 1:
if args[0] != 0:
res = abs(sol[0]+sol[0] + args[1]/args[0]) < 1e-6 and abs(sol[0]*sol[0] - args[2] / args[0]) < 1e-6
else:
res = abs(sol[0] + args[2]/args[1]) < 1e-6
elif number_of_ans == 2 and sol != None and len(sol) == 2:
res = abs(sol[0]+sol[1] + args[1]/args[0]) < 1e-6 and abs(sol[0]*sol[1] - args[2] / args[0]) < 1e-6
elif number_of_ans == 0 and sol != None and sol == []:
res = True
elif number_of_ans == -1 and sol == None:
res = True
print('Case ' + str(args[0]) + 'x^2+' + str(args[1]) + 'x+' + str(args[2]) + '=0: 你的结果为 ' + str(sol) + '。')
if res:
print('结果正确。')
else:
print('结果错误。')
assert(res == True)
def section3_test(case):
question_str = case[0]
question_f = case[1]
ans = case[2]
sol = solve_equation(question_f)
res = False
for a in ans:
if abs(sol - a) < 1e-6:
res = True
break
print('Case ' + question_str + '=0: 你的结果为 ' + str(sol) + '。')
if res:
print('结果正确。')
else:
print('结果错误。')
assert(res == True)
if __name__ == "__main__":
print("正在检查 Section 1:")
section1 = [ ((3, 6), [-2]),
((3, 4), [-4/3]),
((0, 5), []),
((5, 0), [0]),
((0, 0), None) ]
for case in section1:
section1_test(case)
# map(section1_test, section1)
print("Section 1 正确")
print("正在检查 Section 2:")
section2 = [ ([1, 2, 1], 1),
([3, 2, 1], 0),
([4, 5, -6], 2),
([3, 5, 3], 0),
([-2, 4, 1], 2),
([0, 0, 5], 0),
([0, 5, 0], 1),
([0, 0, 0], -1) ]
for case in section2:
section2_test(case)
# assert(solve_with_formula(1, 2, 1) == [-1])
# assert(solve_with_formula(3, 2, 1) == [])
# sol = solve_with_formula(4, 5, -6)
# # print(sol)
# # assert(sol + 5 / 4 < 1e-6)
# assert(abs(sol[0] + sol[1] + 5 / 4) < 1e-6)
# assert(abs(sol[0] * sol[1] + 3 / 2) < 1e-6)
# sol = solve_with_formula(3, 5, 3)
# assert(sol == [])
# sol = solve_with_formula(-2, 4, 1)
# assert(abs(sol[0] + sol[1] + 4 / -2) < 1e-6)
# assert(abs(sol[0] * sol[1] + 1 / 2) < 1e-6)
# assert(solve_with_formula(0, 0, 5) == [])
# assert(solve_with_formula(0, 5, 0) == [0])
# assert(solve_with_formula(0, 0, 0) == None)
print("Section 2 正确")
print("正在检查 Section 3:")
section3 = [ ('lgx+x', lambda x: math.log(x) + x, [0.567143290]),
('x^3-7x^2+11x+3', lambda x: x ** 3 - 7 * x ** 2 + 11 * x + 3, [3, (2 - math.sqrt(5)), (2 + math.sqrt(5))]),
('-3x^3+x^2+9', lambda x: -3 * (x ** 3) + (x ** 2) + 9, [1.5623574989]) ]
for case in section3:
section3_test(case)
# f = lambda x: math.log(x) + x
# assert(abs(solve_equation(f) - 0.567143290) < 1e-6)
# f = lambda x: x ** 3 - 7 * x ** 2 + 11 * x + 3
# assert(
# abs(solve_equation(f) - 3) < 1e-6 or
# abs(solve_equation(f) - (2 - math.sqrt(5))) < 1e-6 or
# abs(solve_equation(f) - (2 + math.sqrt(5))) < 1e-6
# )
# f = lambda x: -3 * (x ** 3) + (x ** 2) + 9
# assert(abs(solve_equation(f) - 1.5623574989) < 1e-6)
print("Section 3 正确")
| 29.055866
| 125
| 0.467795
|
caaa2531a9b87d7ab7addbf6464ff2510794ff75
| 5,473
|
py
|
Python
|
setup.py
|
Richa-123-singh/jina
|
731e6b45756be9a9af2fd118855e010e63c2b541
|
[
"Apache-2.0"
] | 1
|
2020-11-14T09:54:09.000Z
|
2020-11-14T09:54:09.000Z
|
setup.py
|
Richa-123-singh/jina
|
731e6b45756be9a9af2fd118855e010e63c2b541
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Richa-123-singh/jina
|
731e6b45756be9a9af2fd118855e010e63c2b541
|
[
"Apache-2.0"
] | null | null | null |
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
PY37 = 'py37'
PY38 = 'py38'
if sys.version_info >= (3, 8, 0):
py_tag = PY38
elif sys.version_info >= (3, 7, 0):
py_tag = PY37
else:
raise OSError('Jina requires Python 3.7 and above, but yours is %s' % sys.version)
try:
pkg_name = 'jina'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][0]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
def get_extra_requires(path, add_all=True):
import re
from collections import defaultdict
try:
with open(path) as fp:
extra_deps = defaultdict(set)
for k in fp:
if k.strip() and not k.startswith('#'):
tags = set()
if ':' in k:
k, v = k.split(':')
tags.update(vv.strip() for vv in v.split(','))
tags.add(re.split('[<=>]', k)[0])
for t in tags:
extra_deps[t].add(k)
if PY37 not in tags and PY38 not in tags:
# no specific python version required
extra_deps[PY37].add(k)
extra_deps[PY38].add(k)
# add tag `all` at the end
if add_all:
extra_deps['all'] = set(vv for v in extra_deps.values() for vv in v)
extra_deps['match-py-ver'] = extra_deps[py_tag]
return extra_deps
except FileNotFoundError:
return {}
def register_ac():
from pathlib import Path
import os
import re
home = str(Path.home())
resource_path = 'jina/resources/completions/jina.%s'
regex = r'#\sJINA_CLI_BEGIN(.*)#\sJINA_CLI_END'
_check = {'zsh': '.zshrc',
'bash': '.bashrc',
'fish': '.fish'}
def add_ac(k, v):
v_fp = os.path.join(home, v)
if os.path.exists(v_fp):
with open(v_fp) as fp, open(resource_path % k) as fr:
sh_content = fp.read()
if re.findall(regex, sh_content, flags=re.S):
_sh_content = re.sub(regex, fr.read(), sh_content, flags=re.S)
else:
_sh_content = sh_content + '\n\n' + fr.read()
if _sh_content:
with open(v_fp, 'w') as fp:
fp.write(_sh_content)
try:
for k, v in _check.items():
add_ac(k, v)
except Exception:
pass
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
develop.run(self)
register_ac()
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
install.run(self)
register_ac()
all_deps = get_extra_requires('extra-requirements.txt')
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='Jina is the cloud-native neural search solution powered by the state-of-the-art AI and deep learning',
author='Jina Dev Team',
author_email='dev-team@jina.ai',
license='Apache 2.0',
url='https://opensource.jina.ai',
download_url='https://github.com/jina-ai/jina/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=[
'setuptools>=18.0',
],
install_requires=list(all_deps['core'].union(all_deps['perf'])),
extras_require=all_deps,
entry_points={
'console_scripts': ['jina=cli:main'],
},
cmdclass={
'develop': PostDevelopCommand,
'install': PostInstallCommand,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='jina cloud-native neural-search query search index elastic neural-network encoding '
'embedding serving docker container image video audio deep-learning',
)
| 32.772455
| 119
| 0.592728
|
0d6871252e6ccd366749d72260a04c93a040c3d5
| 1,540
|
py
|
Python
|
photo/models.py
|
Tito-74/Gallery
|
67c44783f0d999d61cf450963521f6eb0f3cb995
|
[
"MIT"
] | null | null | null |
photo/models.py
|
Tito-74/Gallery
|
67c44783f0d999d61cf450963521f6eb0f3cb995
|
[
"MIT"
] | null | null | null |
photo/models.py
|
Tito-74/Gallery
|
67c44783f0d999d61cf450963521f6eb0f3cb995
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Location(models.Model):
location = models.CharField(max_length =30)
def __str__(self):
return self.location
def delete_location():
self.delete()
@classmethod
def get_locations(cls):
location = Location.objects.all()
return location
class Category(models.Model):
category = models.CharField(max_length =30)
def __str__(self):
return self.category
def save_category():
self.save()
def delete_category():
self.delete()
class Post(models.Model):
name = models.CharField(max_length =60)
desc = models.TextField()
pub_date = models.DateTimeField(auto_now_add=True)
category = models.ForeignKey(Category,on_delete = models.CASCADE)
location = models.ForeignKey(Location,on_delete = models.CASCADE)
image = models.ImageField(upload_to = 'posts/')
def __str__(self):
return self.name
# f'{self.name} {self.desc}'
def save_post():
self.save()
def delete_post():
self.delete()
@classmethod
def update_post(cls, id,post):
cls.objects.filter(id=id).update(post=post)
@classmethod
def search_category(cls,category):
image =cls.objects.filter(category__category__icontains=category)
return image
@classmethod
def get_post_by_id(cls, post_id):
image = cls.objects.get(id=post_id)
return image
| 20.810811
| 73
| 0.632468
|
0e699d2816cea917a1e9b527bb11f40b7e0ab2ff
| 1,117
|
py
|
Python
|
mysite/main/urls.py
|
DanHunt27/Music-Website
|
2764695997963047009bf6c74f14c370b6225c85
|
[
"MIT"
] | null | null | null |
mysite/main/urls.py
|
DanHunt27/Music-Website
|
2764695997963047009bf6c74f14c370b6225c85
|
[
"MIT"
] | 12
|
2020-06-06T01:27:27.000Z
|
2022-02-11T03:45:47.000Z
|
mysite/main/urls.py
|
DanHunt27/Music-Website
|
2764695997963047009bf6c74f14c370b6225c85
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import (
PostListView,
PostCreateView,
PostUpdateView,
PostDeleteView,
PostExploreView,
UserPostListView,
CommentCreateView,
CommentDeleteView
)
from . import views
import sys
from django.contrib.auth.models import User
urlpatterns = [
path('', PostListView.as_view(), name='index'),
path('explore/', PostExploreView.as_view(), name='explore'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('post/<int:pk>/comment-create/', CommentCreateView.as_view(), name='comment-create'),
path('comment/<int:pk>/delete/', CommentDeleteView.as_view(), name='comment-delete'),
path('post/<int:pk>/like/', views.like, name='like'),
path('follow/<str:username>', views.follow, name='follow'),
path('unfollow/<str:username>', views.unfollow, name='unfollow'),
]
| 38.517241
| 94
| 0.688451
|
151278cbdcbec98b187195f5d133b090ebbf20fe
| 199
|
py
|
Python
|
tests/browsable_api/auth_urls.py
|
scratchmex/django-rest-framework
|
34a93e6a50365dc8901fa493b1bf6d4eb3d09bf7
|
[
"BSD-3-Clause"
] | 17,395
|
2017-03-31T21:13:13.000Z
|
2022-03-31T21:33:13.000Z
|
tests/browsable_api/auth_urls.py
|
scratchmex/django-rest-framework
|
34a93e6a50365dc8901fa493b1bf6d4eb3d09bf7
|
[
"BSD-3-Clause"
] | 2,983
|
2017-03-31T14:43:26.000Z
|
2022-03-31T20:34:23.000Z
|
tests/browsable_api/auth_urls.py
|
scratchmex/django-rest-framework
|
34a93e6a50365dc8901fa493b1bf6d4eb3d09bf7
|
[
"BSD-3-Clause"
] | 5,159
|
2017-03-31T15:06:32.000Z
|
2022-03-31T03:25:17.000Z
|
from django.urls import include, path
from .views import MockView
urlpatterns = [
path('', MockView.as_view()),
path('auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| 22.111111
| 78
| 0.703518
|
3ede1cfe1c9152f85f3d592a025fcfce3843790e
| 1,382
|
py
|
Python
|
python/masa/lib/python3.4/site-packages/sdl2/test/syswm_test.py
|
tekktonic/programming
|
139959ab9934912d4c531e5ee8b1f39094a6823c
|
[
"0BSD"
] | 3
|
2016-07-16T12:19:43.000Z
|
2021-04-22T19:25:53.000Z
|
sdl2/test/syswm_test.py
|
GreatFruitOmsk/py-sdl2
|
8d1465b6238ab33c14ce0dc473529e7b38650062
|
[
"DOC"
] | null | null | null |
sdl2/test/syswm_test.py
|
GreatFruitOmsk/py-sdl2
|
8d1465b6238ab33c14ce0dc473529e7b38650062
|
[
"DOC"
] | 3
|
2018-09-13T09:08:02.000Z
|
2021-04-07T18:44:32.000Z
|
import sys
import unittest
import ctypes
from ..stdinc import SDL_TRUE
from .. import video, syswm, version
class SDLSysWMTest(unittest.TestCase):
__tags__ = ["sdl"]
def setUp(self):
video.SDL_VideoInit(None)
def tearDown(self):
video.SDL_VideoQuit()
def test_SDL_GetWindowWMInfo(self):
window = video.SDL_CreateWindow(b"Test", 10, 10, 10, 10,
video.SDL_WINDOW_HIDDEN)
wminfo = syswm.SDL_SysWMinfo()
version.SDL_VERSION(wminfo.version)
ret = syswm.SDL_GetWindowWMInfo(window, ctypes.byref(wminfo))
self.assertEqual(ret, SDL_TRUE)
if sys.platform in ("win32", "cygwin", "cli"):
self.assertEqual(wminfo.subsystem, syswm.SDL_SYSWM_WINDOWS)
elif sys.platform.startswith("linux"):
self.assertIn(wminfo.subsystem,
(syswm.SDL_SYSWM_X11, syswm.SDL_SYSWM_DIRECTFB))
elif sys.platform.startswith("freebsd"):
self.assertIn(wminfo.subsystem,
(syswm.SDL_SYSWM_X11, syswm.SDL_SYSWM_DIRECTFB))
elif sys.platform.startswith("darwin"):
self.assertEqual(wminfo.subsystem, syswm.SDL_SYSWM_COCOA)
video.SDL_DestroyWindow(window)
# TODO: not sure, what to test here specifically
if __name__ == '__main__':
sys.exit(unittest.main())
| 34.55
| 74
| 0.638929
|
093b10b0cfba501db3237cec8bfb72f9ad02d5d6
| 1,880
|
py
|
Python
|
projects/cats/tests/EC1.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | 1
|
2022-01-22T11:45:01.000Z
|
2022-01-22T11:45:01.000Z
|
projects/cats/tests/EC1.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | null | null | null |
projects/cats/tests/EC1.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | null | null | null |
test = {
'name': 'Problem EC1',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> key_distance_diff("wird", "word", 4)
0.6834861261734088
>>> key_distance_diff("aird", "word", 4)
1.650081475501692
>>> key_distance_diff("bord", "word", 4)
2
>>> key_distance_diff("word", "word", 0)
0
>>> key_distance_diff("word", "bord", 0)
inf
>>> key_distance_diff("w", "word", 4)
3
>>> key_distance_diff("speling","spelling", 8)
1
>>> key_distance_diff("speliing","spelling", 10)
0.9665953493282832
>>> key_distance_diff("spelosng","spelling", 10)
2.683486126173409
>>> key_distance_diff("spelosn","spelling", 10)
3.683486126173409
>>> key_distance_diff("word", "swoed", 3)
1.6834861261734089
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect("woll", common_words, key_distance_diff, 4)
'will'
>>> autocorrect("woll", common_words, meowstake_matches, 4)
'well'
>>> autocorrect("nird", all_words, key_distance_diff, 2)
'bird'
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> try:
... from cats import key_distance_diff, meowstake_matches, autocorrect, lines_from_file
... all_words = lines_from_file("data/words.txt")
... common_words = lines_from_file("data/common_words.txt")
... except ImportError:
... raise ImportError("You probably didn't define key_distance_diff in typing_test.py yet!")
""",
'teardown': '',
'type': 'doctest'
}
]
}
| 30.322581
| 101
| 0.500532
|
1d93efcf125b381931c1a148e80aedaff60d135f
| 1,954
|
py
|
Python
|
color_transfer.py
|
alissonpina/color_transfer
|
2d4488d6bc9fca934644382ac649ffff025d2db9
|
[
"MIT"
] | null | null | null |
color_transfer.py
|
alissonpina/color_transfer
|
2d4488d6bc9fca934644382ac649ffff025d2db9
|
[
"MIT"
] | null | null | null |
color_transfer.py
|
alissonpina/color_transfer
|
2d4488d6bc9fca934644382ac649ffff025d2db9
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import os
from tkinter import filedialog
def get_sources():
sources = [i for i in os.listdir('_source') if i[-4:] in ['.png', '.bmp', '.jpg']]
if len(sources) == 0:
print("There must be at least one image in the '_source' folder.")
else:
return sources
def get_target():
target = filedialog.askopenfilename(initialdir="_target", title="Select A Target", filetypes=[('image files', ('.png', '.bmp', '.jpg'))])
if len(target) == 0:
print("Select an image that will be used as a target.")
else:
return cv2.cvtColor(cv2.imread(target), cv2.COLOR_BGR2LAB)
def get_mean_and_std(x):
x_mean, x_std = cv2.meanStdDev(x)
x_mean = np.hstack(np.around(x_mean,2))
x_std = np.hstack(np.around(x_std,2))
return x_mean, x_std
def color_transfer():
sources = get_sources()
if sources:
t = get_target()
t_mean, t_std = get_mean_and_std(t)
if len(t) != 0:
for n in range(len(sources)):
print("Converting picture " + sources[n] + "...")
s = cv2.imread('_target/' + sources[n])
s = cv2.cvtColor(s, cv2.COLOR_BGR2LAB)
s_mean, s_std = get_mean_and_std(s)
height, width, channel = s.shape
for i in range(0,height):
for j in range(0,width):
for k in range(0,channel):
x = s[i,j,k]
x = ((x - s_mean[k]) * (t_std[k] / s_std[k])) + t_mean[k]
x = round(x)
x = 0 if x < 0 else x
x = 255 if x > 255 else x
s[i,j,k] = x
s = cv2.cvtColor(s, cv2.COLOR_LAB2BGR)
cv2.imwrite('_output/' + sources[n], s)
color_transfer()
| 29.164179
| 141
| 0.495394
|
0a9e0196fbb46645b03e537ad7f822595ae970b6
| 477
|
py
|
Python
|
p191_number_of_1_bits.py
|
moonfruit/leetcode
|
796b736d9b7b31f8052df6a0a140e34904b8230c
|
[
"MIT"
] | null | null | null |
p191_number_of_1_bits.py
|
moonfruit/leetcode
|
796b736d9b7b31f8052df6a0a140e34904b8230c
|
[
"MIT"
] | null | null | null |
p191_number_of_1_bits.py
|
moonfruit/leetcode
|
796b736d9b7b31f8052df6a0a140e34904b8230c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- utf-8 -*-
class Solution:
# @param n, an integer
# @return an integer
def hammingWeight(self, n):
sum = 0
while n:
n &= n - 1 # this is used to delete the right "1" of n.
sum += 1
return sum
def hammingWeight2(self, n):
sum = 0
while n:
sum += n & 1
n >>= 1
return sum
if __name__ == '__main__':
print(Solution().hammingWeight(11))
| 19.875
| 68
| 0.48218
|
62d860404bfd8f7a91ed0058712bd6c316e5b999
| 1,782
|
py
|
Python
|
solnml/components/hpo_optimizer/mfse_optimizer.py
|
williamy1996/Autoexpression
|
b470d9ff67074c8b076abbc1dce359db9a36f921
|
[
"MIT"
] | null | null | null |
solnml/components/hpo_optimizer/mfse_optimizer.py
|
williamy1996/Autoexpression
|
b470d9ff67074c8b076abbc1dce359db9a36f921
|
[
"MIT"
] | null | null | null |
solnml/components/hpo_optimizer/mfse_optimizer.py
|
williamy1996/Autoexpression
|
b470d9ff67074c8b076abbc1dce359db9a36f921
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
from solnml.components.hpo_optimizer.base_optimizer import BaseHPOptimizer
from solnml.components.utils.mfse_utils.mfsebase import MfseBase
class MfseOptimizer(BaseHPOptimizer, MfseBase):
def __init__(self, evaluator, config_space, time_limit=None, evaluation_limit=None,
per_run_time_limit=600, per_run_mem_limit=1024, output_dir='./', trials_per_iter=1, seed=1,
R=81, eta=3, n_jobs=1):
BaseHPOptimizer.__init__(self, evaluator, config_space, seed)
MfseBase.__init__(self, eval_func=self.evaluator, config_space=self.config_space,
seed=seed, R=R, eta=eta, n_jobs=n_jobs)
self.time_limit = time_limit
self.evaluation_num_limit = evaluation_limit
self.trials_per_iter = trials_per_iter
self.per_run_time_limit = per_run_time_limit
self.per_run_mem_limit = per_run_mem_limit
def iterate(self, num_iter=1):
'''
Iterate a SH procedure (inner loop) in Hyperband.
:return:
'''
_start_time = time.time()
for _ in range(num_iter):
self._mfse_iterate(self.s_values[self.inner_iter_id])
self.inner_iter_id = (self.inner_iter_id + 1) % (self.s_max + 1)
iteration_cost = time.time() - _start_time
inc_idx = np.argmin(np.array(self.incumbent_perfs))
for idx in range(len(self.incumbent_perfs)):
self.eval_dict[(None, self.incumbent_configs[idx])] = -self.incumbent_perfs[idx]
self.incumbent_perf = -self.incumbent_perfs[inc_idx]
self.incumbent_config = self.incumbent_configs[inc_idx]
# incumbent_perf: the large the better
return self.incumbent_perf, iteration_cost, self.incumbent_config
| 44.55
| 108
| 0.686869
|
80c6fad9de20ca309b48540e314f67099eb36d0e
| 2,592
|
py
|
Python
|
dragonfire/coref.py
|
ismlkrkmz/Dragonfire
|
7a5e22bd07ba9734d68fe76ce77d80164d47249e
|
[
"MIT"
] | 1,320
|
2017-06-20T21:47:35.000Z
|
2022-03-29T08:53:31.000Z
|
dragonfire/coref.py
|
ismlkrkmz/Dragonfire
|
7a5e22bd07ba9734d68fe76ce77d80164d47249e
|
[
"MIT"
] | 120
|
2017-06-21T13:16:40.000Z
|
2022-03-24T18:12:21.000Z
|
dragonfire/coref.py
|
ismlkrkmz/Dragonfire
|
7a5e22bd07ba9734d68fe76ce77d80164d47249e
|
[
"MIT"
] | 229
|
2017-06-21T05:38:43.000Z
|
2022-03-14T14:03:10.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: coref
:platform: Unix
:synopsis: the top-level submodule of Dragonfire that aims to create corefference based dialogs.
.. moduleauthor:: Mehmet Mert Yıldıran <mert.yildiran@bil.omu.edu.tr>
"""
import itertools # Functions creating iterators for efficient looping
import neuralcoref # Fast Coreference Resolution in spaCy with Neural Networks
class NeuralCoref():
"""Class to provide corefference based dialogs.
"""
def __init__(self, nlp):
"""Initialization method of :class:`dragonfire.coref.NeuralCoref` class.
"""
self.nlp = nlp
neuralcoref.add_to_pipe(self.nlp)
self.coms = []
def core(self, doc, n_sents):
"""Core resolution
"""
resolution = doc._.coref_resolved
chained = self.nlp(resolution)
total_sents = sum(1 for sent in chained.sents)
sents = itertools.islice(chained.sents, total_sents - n_sents, None)
sents_arr = []
for sent in sents:
sents_arr.append(sent.text)
return " ".join(sents_arr)
def resolve(self, com):
"""Method to return the version of command where each corefering mention is replaced by the main mention in the associated cluster (compared to previous commands).
Args:
com (str): User's command.
Returns:
str: Resolution.
"""
com_doc = self.nlp(com)
n_sents = sum(1 for sent in com_doc.sents)
token = None
for token in com_doc:
pass
if token.tag_ not in [',', ':', "."]:
com += '.'
self.coms.append(com)
if len(self.coms) > 1:
chain = " ".join(self.coms[-2:])
doc = self.nlp(chain)
if doc._.has_coref:
return self.core(doc, n_sents)
return com
return com
def resolve_api(self, com, previous=None):
if not previous:
return com
com_doc = self.nlp(com)
n_sents = sum(1 for sent in com_doc.sents)
token = None
for token in com_doc:
pass
if token.tag_ not in [',', ':', "."]:
com += '.'
previous_doc = self.nlp(previous)
token = None
for token in previous_doc:
pass
if token.tag_ not in [',', ':', "."]:
previous += '.'
chain = previous + " " + com
doc = self.nlp(chain)
if doc._.has_coref:
return self.core(doc, n_sents)
return com
| 26.44898
| 171
| 0.560957
|
3a2f693efe954cf0858763a51db6ea7c39b20490
| 7,583
|
py
|
Python
|
src/probnum/filtsmooth/_gaussfiltsmooth/_unscentedtransform.py
|
christopheroates/probnum
|
4ae63da307bd7279c3ce477ef68cbd0b8e30c73a
|
[
"MIT"
] | 226
|
2019-11-01T09:44:09.000Z
|
2022-03-30T23:17:17.000Z
|
src/probnum/filtsmooth/_gaussfiltsmooth/_unscentedtransform.py
|
christopheroates/probnum
|
4ae63da307bd7279c3ce477ef68cbd0b8e30c73a
|
[
"MIT"
] | 590
|
2019-11-21T08:32:30.000Z
|
2022-03-31T12:37:37.000Z
|
src/probnum/filtsmooth/_gaussfiltsmooth/_unscentedtransform.py
|
christopheroates/probnum
|
4ae63da307bd7279c3ce477ef68cbd0b8e30c73a
|
[
"MIT"
] | 39
|
2020-01-13T16:29:45.000Z
|
2022-03-28T16:16:54.000Z
|
"""Unscented Transform."""
import numpy as np
class UnscentedTransform:
"""Used for unscented Kalman filter.
See also p. 7 ("Unscented transform:") of [1]_.
Parameters
----------
dimension : int
Spatial dimensionality
spread : float
Spread of the sigma points around mean
priorpar : float
Incorporate prior knowledge about distribution of x.
For Gaussians, 2.0 is optimal (see link below)
special_scale : float
Secondary scaling parameter.
The primary parameter is computed below.
References
----------
.. [1] Wan, E. A. and van der Merwe, R., The Unscented Kalman Filter,
http://read.pudn.com/downloads135/ebook/574389/wan01unscented.pdf
"""
def __init__(self, dimension, spread=1e-4, priorpar=2.0, special_scale=0.0):
self.scale = _compute_scale(dimension, spread, special_scale)
self.dimension = dimension
self.mweights, self.cweights = _unscented_weights(
spread, priorpar, self.dimension, self.scale
)
def sigma_points(self, rv):
"""Sigma points.
Parameters
----------
mean: np.ndarray, shape (d,)
mean of Gaussian distribution
covar: np.ndarray, shape (d, d)
kernels of Gaussian distribution
Returns
-------
np.ndarray, shape (2 * d + 1, d)
"""
if len(rv.mean) != self.dimension:
raise ValueError("Dimensionality does not match UT")
sigpts = np.zeros((2 * self.dimension + 1, self.dimension))
sqrtcovar = rv.cov_cholesky
sigpts[0] = rv.mean.copy()
for idx in range(self.dimension):
sigpts[idx + 1] = (
rv.mean + np.sqrt(self.dimension + self.scale) * sqrtcovar[:, idx]
)
sigpts[self.dimension + 1 + idx] = (
rv.mean - np.sqrt(self.dimension + self.scale) * sqrtcovar[:, idx]
)
return sigpts
def propagate(self, time, sigmapts, modelfct):
"""Propagate sigma points.
Parameters
----------
time : float
Time :math:`t` which is passed on to the modelfunction.
sigmapts : np.ndarray, shape=(2 N+1, N)
Sigma points (N is the spatial dimension of the dynamic model)
modelfct : callable, signature=(t, x, \\**kwargs)
Function through which to propagate
Returns
-------
np.ndarray, shape=(2 N + 1, M),
M is the dimension of the measurement model
"""
propsigpts = np.array([modelfct(time, pt) for pt in sigmapts])
return propsigpts
def estimate_statistics(self, proppts, sigpts, covmat, mpred):
"""Computes predicted summary statistics, predicted
mean/kernels/crosscovariance, from (propagated) sigmapoints.
Not to be confused with mean and kernels resulting from the
prediction step of the Bayesian filter. Hence we call it
"estimate_*" instead of "predict_*".
"""
estmean = _estimate_mean(self.mweights, proppts)
estcovar = _estimate_covar(self.cweights, proppts, estmean, covmat)
estcrosscovar = _estimate_crosscovar(
self.cweights, proppts, estmean, sigpts, mpred
)
return estmean, estcovar, estcrosscovar
def _compute_scale(dimension, spread, special_scale):
"""See BFaS; p. 83.
Parameters
----------
dimension: int
Spatial dimensionality of state space model
spread: float
Spread of sigma points around mean (1; alpha)
special_scale: float
Spread of sigma points around mean (2; kappa)
Returns
-------
float
Scaling parameter for unscented transform
"""
return spread ** 2 * (dimension + special_scale) - dimension
def _unscented_weights(spread, priorpar, dimension, scale):
"""See BFaS; p. 84.
Parameters
----------
spread: float
Spread of sigma points around mean (alpha)
priorpar: float
Prior information parameter (beta)
dimension : int
Dimension of the state space
scale : float
Scaling parameter for unscented transform
Returns
-------
np.ndarray, shape (2 * dimension + 1,)
constant mean weights.
np.ndarray, shape (2 * dimension + 1,)
constant kernels weights.
"""
mweights = _meanweights(dimension, scale)
cweights = _covarweights(dimension, spread, priorpar, scale)
return mweights, cweights
def _meanweights(dimension, lam):
"""Mean weights.
Parameters
----------
dimension: int
Spatial dimensionality of state space model
lam: float
Scaling parameter for unscented transform (lambda)
Returns
-------
np.ndarray, shape (2*dimension+1,)
Constant mean weights.
"""
mw0 = np.ones(1) * lam / (dimension + lam)
mw = np.ones(2 * dimension) / (2.0 * (dimension + lam))
return np.hstack((mw0, mw))
def _covarweights(dimension, alp, bet, lam):
"""Covariance weights.
Parameters
----------
dimension: int
Spatial dimensionality of state space model
alp: float
Spread of sigma points around mean (alpha)
bet: float
Prior information parameter (beta)
lam: float
Scaling parameter for unscented transform (lambda)
Returns
-------
np.ndarray, shape (2 * dimension + 1,)
the constant kernels weights.
"""
cw0 = np.ones(1) * lam / (dimension + lam) + (1 - alp ** 2 + bet)
cw = np.ones(2 * dimension) / (2.0 * (dimension + lam))
return np.hstack((cw0, cw))
def _estimate_mean(mweights, proppts):
"""See BFaS; p. 88.
Parameters
----------
mweights: np.ndarray, shape (2*dimension + 1,)
Constant mean weights for unscented transform.
proppts: np.ndarray, shape (2*dimension + 1, dimension)
Propagated sigma points
Returns
-------
np.ndarray, shape (dimension,)
Estimated mean.
"""
return mweights @ proppts
def _estimate_covar(cweights, proppts, mean, covmat):
"""See BFaS; p. 88.
Parameters
----------
cweights: np.ndarray, shape (2*dimension + 1,)
Constant kernels weights for unscented transform.
proppts: np.ndarray, shape (2*dimension + 1, dimension)
Propagated sigma points
mean: np.ndarray, shape (dimension,)
Result of _estimate_mean(...)
covmat: np.ndarray, shape (dimension, dimension)
Covariance of measurement model at current time.
Returns
-------
np.ndarray, shape (dimension, dimension)
Estimated kernels.
"""
cent = proppts - mean
empcov = cent.T @ (cweights * cent.T).T
return empcov + covmat
def _estimate_crosscovar(cweights, proppts, mean, sigpts, mpred):
"""See BFaS; p.88.
Parameters
----------
cweights: np.ndarray, shape (2*dimension + 1,)
Constant kernels weights for unscented transform.
sigpts: np.ndarray, shape (2*dimension + 1, dimension)
Sigma points
mpred: np.ndarray, shape (dimension,)
Predicted mean
proppts: np.ndarray, shape (2*dimension + 1, dimension)
Propagated sigma points
mean: np.ndarray, shape (dimension,)
Result of _estimate_mean(...)
Returns
-------
np.ndarray, shape (dimension,)
Estimated kernels.
"""
cent_prop = proppts - mean
cent_sig = sigpts - mpred
empcrosscov = cent_sig.T @ (cweights * cent_prop.T).T
return empcrosscov
| 29.277992
| 82
| 0.60662
|
e6aa8d1f64c82d8deaee1350382b8abe74c18ee0
| 5,929
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/networkrepository/cl100001d8trial3.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/networkrepository/cl100001d8trial3.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/networkrepository/cl100001d8trial3.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph CL-10000-1d8-trial3.
The graph is automatically retrieved from the NetworkRepository repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 10:40:02.258089
The undirected graph CL-10000-1d8-trial3 has 9251 nodes and 43811 unweighted
edges, of which 21 are self-loops. The graph is sparse as it has a density
of 0.00102 and has 11 connected components, where the component with most
nodes has 9231 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 3, the mean node degree is 9.47, and the
node degree mode is 2. The top 5 most central nodes are 6114 (degree 212),
6730 (degree 203), 2448 (degree 194), 531 (degree 193) and 3534 (degree
190).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import Cl100001d8Trial3
# Then load the graph
graph = Cl100001d8Trial3()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def Cl100001d8Trial3(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the CL-10000-1d8-trial3 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of CL-10000-1d8-trial3 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 10:40:02.258089
The undirected graph CL-10000-1d8-trial3 has 9251 nodes and 43811 unweighted
edges, of which 21 are self-loops. The graph is sparse as it has a density
of 0.00102 and has 11 connected components, where the component with most
nodes has 9231 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 3, the mean node degree is 9.47, and the
node degree mode is 2. The top 5 most central nodes are 6114 (degree 212),
6730 (degree 203), 2448 (degree 194), 531 (degree 193) and 3534 (degree
190).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import Cl100001d8Trial3
# Then load the graph
graph = Cl100001d8Trial3()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="Cl100001d8Trial3",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.398907
| 94
| 0.690336
|
f0f12fe253ed767986ddf45c429e1ddd481cfbdd
| 10,323
|
py
|
Python
|
ludwig/data/dataset/ray.py
|
Playfloor/ludwig
|
a5339b988fd28fd611cdaa08b64f4c6ff0e3f229
|
[
"Apache-2.0"
] | null | null | null |
ludwig/data/dataset/ray.py
|
Playfloor/ludwig
|
a5339b988fd28fd611cdaa08b64f4c6ff0e3f229
|
[
"Apache-2.0"
] | null | null | null |
ludwig/data/dataset/ray.py
|
Playfloor/ludwig
|
a5339b988fd28fd611cdaa08b64f4c6ff0e3f229
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import contextlib
import math
import queue
import threading
from distutils.version import LooseVersion
from functools import lru_cache
from typing import Any, Dict, Iterator, Union
import numpy as np
import pandas as pd
import ray
from ray.data import from_dask, read_parquet
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.extensions import TensorDtype
from ludwig.constants import BINARY, CATEGORY, NAME, NUMBER, TYPE
from ludwig.data.batcher.base import Batcher
from ludwig.data.dataset.base import Dataset, DatasetManager
from ludwig.utils.data_utils import DATA_TRAIN_HDF5_FP
from ludwig.utils.misc_utils import get_proc_features
from ludwig.utils.types import DataFrame
_ray18 = LooseVersion(ray.__version__) >= LooseVersion("1.8")
_SCALAR_TYPES = {BINARY, CATEGORY, NUMBER}
class RayDataset(Dataset):
"""Wrapper around ray.data.Dataset."""
def __init__(self, df: Union[str, DataFrame], features: Dict[str, Dict], training_set_metadata: Dict[str, Any]):
self.ds = from_dask(df) if not isinstance(df, str) else read_parquet(df)
self.features = features
self.training_set_metadata = training_set_metadata
self.data_hdf5_fp = training_set_metadata.get(DATA_TRAIN_HDF5_FP)
# TODO ray 1.8: convert to Tensors before shuffle
# def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
# for c in features.keys():
# df[c] = df[c].astype(TensorDtype())
# return df
# self.ds = self.ds.map_batches(to_tensors, batch_format="pandas")
def pipeline(self, shuffle=True) -> DatasetPipeline:
pipe = self.ds.repeat()
if shuffle:
if _ray18:
pipe = pipe.random_shuffle_each_window()
else:
pipe = pipe.random_shuffle()
return pipe
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128, should_shuffle=True, seed=0, ignore_last=False, horovod=None):
yield RayDatasetBatcher(
self.ds.repeat().iter_datasets(),
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
def __len__(self):
return self.ds.count()
@property
def size(self):
return len(self)
class RayDatasetManager(DatasetManager):
def __init__(self, backend):
self.backend = backend
def create(self, dataset: Union[str, DataFrame], config: Dict[str, Any], training_set_metadata: Dict[str, Any]):
return RayDataset(dataset, get_proc_features(config), training_set_metadata)
def save(
self,
cache_path: str,
dataset: DataFrame,
config: Dict[str, Any],
training_set_metadata: Dict[str, Any],
tag: str,
):
self.backend.df_engine.to_parquet(dataset, cache_path)
return cache_path
def can_cache(self, skip_save_processed_input):
return not skip_save_processed_input
@property
def data_format(self):
return "parquet"
class RayDatasetShard(Dataset):
def __init__(
self,
dataset_shard: DatasetPipeline,
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
):
self.dataset_shard = dataset_shard
self.features = features
self.training_set_metadata = training_set_metadata
self.dataset_iter = dataset_shard.iter_datasets()
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128, should_shuffle=True, seed=0, ignore_last=False, horovod=None):
yield RayDatasetBatcher(
self.dataset_iter,
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
@lru_cache(1)
def __len__(self):
# TODO(travis): find way to avoid calling this, as it's expensive
return next(self.dataset_iter).count()
@property
def size(self):
return len(self)
class RayDatasetBatcher(Batcher):
def __init__(
self,
dataset_epoch_iterator: Iterator[ray.data.Dataset],
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
batch_size: int,
samples_per_epoch: int,
):
self.dataset_epoch_iterator = dataset_epoch_iterator
self.batch_size = batch_size
self.samples_per_epoch = samples_per_epoch
self.training_set_metadata = training_set_metadata
self.features = features
self.columns = list(features.keys())
self.reshape_map = {
proc_column: training_set_metadata[feature[NAME]].get("reshape")
for proc_column, feature in features.items()
}
self.dataset_batch_iter = None
self._epoch = 0
self._next_batch = None
self._last_batch = False
self._step = 0
self._fetch_next_epoch()
def next_batch(self):
if self.last_batch():
raise StopIteration()
batch = self._next_batch
self._fetch_next_batch()
self._step += 1
return batch
def last_batch(self):
return self._last_batch
def set_epoch(self, epoch, batch_size):
self.batch_size = batch_size
if epoch != self._epoch:
self._fetch_next_epoch()
self._epoch = epoch
@property
def step(self):
return self._step
@property
def steps_per_epoch(self):
return math.ceil(self.samples_per_epoch / self.batch_size)
def _fetch_next_epoch(self):
dataset = next(self.dataset_epoch_iterator)
read_parallelism = 1
if read_parallelism == 1:
self.dataset_batch_iter = self._create_async_reader(dataset)
elif read_parallelism > 1:
self.dataset_batch_iter = self._create_async_parallel_reader(dataset, read_parallelism)
else:
# TODO: consider removing this. doesn't work currently and read performance seems generally
# very good with 1 parallelism
self.dataset_batch_iter = self._create_sync_reader(dataset)
self._step = 0
self._fetch_next_batch()
def _fetch_next_batch(self):
if self.dataset_batch_iter is None:
self._last_batch = True
return
self._last_batch = False
try:
self._next_batch = next(self.dataset_batch_iter)
except StopIteration:
self._last_batch = True
def _to_tensors_fn(self):
columns = self.columns
features = self.features
def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
for c in columns:
# do not convert scalar columns: https://github.com/ray-project/ray/issues/20825
if features[c][TYPE] not in _SCALAR_TYPES:
df[c] = df[c].astype(TensorDtype())
return df
return to_tensors
def _prepare_batch(self, batch: pd.DataFrame) -> Dict[str, np.ndarray]:
res = {c: batch[c].to_numpy() for c in self.columns}
for c in self.columns:
reshape = self.reshape_map.get(c)
if reshape is not None:
res[c] = res[c].reshape((-1, *reshape))
return res
def _create_sync_reader(self, dataset: ray.data.Dataset):
to_tensors = self._to_tensors_fn()
def sync_read():
for batch in dataset.map_batches(to_tensors, batch_format="pandas").iter_batches(
prefetch_blocks=0, batch_size=self.batch_size, batch_format="pandas"
):
yield self._prepare_batch(batch)
return sync_read()
def _create_async_reader(self, dataset: ray.data.Dataset):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
def producer():
for batch in dataset.map_batches(to_tensors, batch_format="pandas").iter_batches(
prefetch_blocks=0, batch_size=batch_size, batch_format="pandas"
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_read():
t = threading.Thread(target=producer)
t.start()
while True:
batch = q.get(block=True)
if batch is None:
break
yield batch
t.join()
return async_read()
def _create_async_parallel_reader(self, dataset: ray.data.Dataset, num_threads: int):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
splits = dataset.split(n=num_threads)
def producer(i):
for batch in (
splits[i]
.map_batches(to_tensors, batch_format="pandas")
.iter_batches(prefetch_blocks=0, batch_size=batch_size, batch_format="pandas")
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_parallel_read():
threads = [threading.Thread(target=producer, args=(i,)) for i in range(num_threads)]
for t in threads:
t.start()
active_threads = num_threads
while True:
batch = q.get(block=True)
if batch is None:
active_threads -= 1
if active_threads == 0:
break
yield batch
for t in threads:
t.join()
return async_parallel_read()
| 32.158879
| 116
| 0.622881
|
ee324ab30d9f714444d06ce3517f12521adbed08
| 437
|
py
|
Python
|
tests/test_deprecation.py
|
Roynecro97/easypy
|
9f36732b558477557b8a57cfad2840767eff0d12
|
[
"BSD-3-Clause"
] | 7
|
2020-03-23T08:30:29.000Z
|
2020-12-05T14:51:49.000Z
|
tests/test_deprecation.py
|
Roynecro97/easypy
|
9f36732b558477557b8a57cfad2840767eff0d12
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_deprecation.py
|
Roynecro97/easypy
|
9f36732b558477557b8a57cfad2840767eff0d12
|
[
"BSD-3-Clause"
] | 6
|
2020-04-28T12:20:14.000Z
|
2022-02-15T15:01:42.000Z
|
import pytest
from easypy.deprecation import deprecated_arguments
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_deprecated_arguments():
@deprecated_arguments(foo='bar')
def func(bar):
return 'bar is %s' % (bar,)
assert func(1) == func(foo=1) == func(bar=1) == 'bar is 1'
with pytest.raises(TypeError):
func(foo=1, bar=2)
with pytest.raises(TypeError):
func(1, foo=2)
| 24.277778
| 62
| 0.663616
|
086dee3924246ece52b68b95827638fcfea4405d
| 77,922
|
py
|
Python
|
qatrack/qa/tests/test_models.py
|
crcrewso/qatrackplus
|
b9da3bc542d9e3eca8b7291bb631d1c7255d528e
|
[
"MIT"
] | 20
|
2021-03-11T18:37:32.000Z
|
2022-03-23T19:38:07.000Z
|
qatrack/qa/tests/test_models.py
|
crcrewso/qatrackplus
|
b9da3bc542d9e3eca8b7291bb631d1c7255d528e
|
[
"MIT"
] | 75
|
2021-02-12T02:37:33.000Z
|
2022-03-29T20:56:16.000Z
|
qatrack/qa/tests/test_models.py
|
crcrewso/qatrackplus
|
b9da3bc542d9e3eca8b7291bb631d1c7255d528e
|
[
"MIT"
] | 5
|
2021-04-07T15:46:53.000Z
|
2021-09-18T16:55:00.000Z
|
from unittest import mock
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.test import TestCase
from django.utils import timezone
from django_comments.models import Comment
import pytest
from qatrack.qa import models
from qatrack.qatrack_core import scheduling
from . import utils
def utc_2am():
return timezone.make_aware(timezone.datetime(2014, 4, 2, 2), timezone.utc)
class TestFrequencyManager(TestCase):
def test_choices(self):
intervals = (
("Daily", "daily", 1, 1, 0),
("Weekly", "weekly", 7, 7, 2),
("Monthly", "monthly", 28, 28, 7),
)
for t, s, nom, due, overdue in intervals:
utils.create_frequency(name=t, slug=s, interval=due, window_end=overdue)
self.assertEqual([(x[1], x[0]) for x in intervals], list(models.Frequency.objects.frequency_choices()))
def test_by_natural_key(self):
utils.create_frequency(name="Daily", slug="daily")
f = models.Frequency.objects.get_by_natural_key('daily')
assert f.name == 'Daily'
class TestFrequency(TestCase):
def test_nominal_interval_set(self):
intervals = (
("Daily", "daily", 1, 1, 0),
("Weekly", "weekly", 7, 7, 2),
("Monthly", "monthly", 28, 28, 7),
)
for t, s, nom, due, overdue in intervals:
f = utils.create_frequency(name=t, slug=s, interval=due, window_end=overdue)
assert 1 <= round(f.nominal_interval) <= round(nom)
def test_natural_key(self):
assert models.Frequency(slug="daily").natural_key() == ("daily",)
def test_classical(self):
assert models.Frequency(window_start=None).classical
class TestStatus(TestCase):
def test_save_without_default(self):
"""If there's only one status type force it to be default on save"""
self.assertIsNone(models.TestInstanceStatus.objects.default())
status = models.TestInstanceStatus(
name="foo",
slug="foo",
is_default=False,
)
status.save()
self.assertEqual(status, models.TestInstanceStatus.objects.default())
def test_new_default(self):
status = models.TestInstanceStatus(
name="foo",
slug="foo",
is_default=True,
)
status.save()
new_status = models.TestInstanceStatus(
name="bar",
slug="bar",
is_default=True,
)
new_status.save()
defaults = models.TestInstanceStatus.objects.filter(is_default=True)
self.assertEqual(list(defaults), [new_status])
def test_get_by_natural_key(self):
new_status = models.TestInstanceStatus(
name="bar",
slug="bar",
is_default=True,
)
new_status.save()
assert models.TestInstanceStatus.objects.get_by_natural_key("bar").name == "bar"
def test_natural_key(self):
new_status = models.TestInstanceStatus(
name="bar",
slug="bar",
is_default=True,
)
new_status.save()
assert new_status.natural_key() == (new_status.slug,)
class TestReference(TestCase):
def test_invalid_value(self):
u = utils.create_user()
r = models.Reference(name="bool", type=models.BOOLEAN, value=3, created_by=u, modified_by=u)
self.assertRaises(ValidationError, r.clean_fields)
def test_display_value(self):
t = models.Reference(type=models.BOOLEAN, value=1)
f = models.Reference(type=models.BOOLEAN, value=0)
v = models.Reference(type=models.NUMERICAL, value=0)
n = models.Reference(type=models.NUMERICAL)
self.assertTrue(t.value_display() == "Yes")
self.assertTrue(f.value_display() == "No")
self.assertTrue(v.value_display() == "0")
self.assertTrue(n.value_display() == "")
class TestTolerance(TestCase):
def test_pass_choices(self):
t = models.Tolerance(mc_pass_choices="a,b,c")
self.assertListEqual(["a", "b", "c"], t.pass_choices())
def test_tol_choices(self):
t = models.Tolerance(mc_tol_choices="a,b,c")
self.assertListEqual(["a", "b", "c"], t.tol_choices())
def test_no_pass_vals(self):
t = models.Tolerance(mc_pass_choices=" ", type=models.MULTIPLE_CHOICE)
self.assertRaises(ValidationError, t.clean_choices)
def test_act_set(self):
t = models.Tolerance(mc_pass_choices="", act_high=1, type=models.MULTIPLE_CHOICE)
self.assertRaises(ValidationError, t.clean_choices)
def test_pass_is_none(self):
t = models.Tolerance(type=models.MULTIPLE_CHOICE)
self.assertRaises(ValidationError, t.clean_choices)
def test_with_tol_choices(self):
t = models.Tolerance(mc_pass_choices="a", mc_tol_choices=" ", type=models.MULTIPLE_CHOICE)
t.clean_choices()
def test_ok_mc(self):
t = models.Tolerance(mc_pass_choices="a", mc_tol_choices="b", type=models.MULTIPLE_CHOICE)
t.clean_fields()
self.assertListEqual(t.tol_choices(), ["b"])
self.assertListEqual(t.pass_choices(), ["a"])
def test_without_act(self):
t = models.Tolerance(type=models.ABSOLUTE)
self.assertRaises(ValidationError, t.clean_tols)
def test_invalid_mc_choices(self):
t = models.Tolerance(mc_pass_choices="a", type=models.ABSOLUTE)
self.assertRaises(ValidationError, t.clean_choices)
t = models.Tolerance(mc_tol_choices="a", type=models.ABSOLUTE)
self.assertRaises(ValidationError, t.clean_choices)
def test_no_pass_choices(self):
t = models.Tolerance(mc_pass_choices="", type=models.MULTIPLE_CHOICE)
self.assertRaises(ValidationError, t.clean_choices)
def test_no_tol_choices(self):
t = models.Tolerance(mc_pass_choices="a", mc_tol_choices="", type=models.MULTIPLE_CHOICE)
t.clean_choices()
t = models.Tolerance(mc_pass_choices="a", type=models.MULTIPLE_CHOICE)
t.clean_choices()
def test_tolerances_for_value_none(self):
expected = {models.ACT_HIGH: None, models.ACT_LOW: None, models.TOL_LOW: None, models.TOL_HIGH: None}
t = models.Tolerance()
self.assertDictEqual(t.tolerances_for_value(None), expected)
def test_tolerances_for_value_absolute(self):
expected = {models.ACT_HIGH: 55, models.ACT_LOW: 51, models.TOL_LOW: 52, models.TOL_HIGH: 54}
t = utils.create_tolerance(act_high=2, act_low=-2, tol_high=1, tol_low=-1, tol_type=models.ABSOLUTE)
self.assertDictEqual(expected, t.tolerances_for_value(53))
def test_tolerances_for_value_percent(self):
expected = {models.ACT_HIGH: 1.02, models.ACT_LOW: 0.98, models.TOL_LOW: 0.99, models.TOL_HIGH: 1.01}
t = utils.create_tolerance(act_high=2, act_low=-2, tol_high=1, tol_low=-1, tol_type=models.PERCENT)
self.assertDictEqual(expected, t.tolerances_for_value(1))
def test_percent_string_rep(self):
t = utils.create_tolerance(act_high=None, act_low=-2, tol_high=1, tol_low=None, tol_type=models.PERCENT)
self.assertEqual(t.name, "Percent(-2.00%, --, 1.00%, --)")
def test_absolute_string_rep(self):
t = utils.create_tolerance(act_high=None, act_low=-2, tol_high=1, tol_low=None, tol_type=models.ABSOLUTE)
self.assertEqual(t.name, "Absolute(-2.000, --, 1.000, --)")
def test_mc_string_rep(self):
t = utils.create_tolerance(mc_pass_choices="a,b,c", mc_tol_choices="d,e", tol_type=models.MULTIPLE_CHOICE)
expected = "M.C.(%s=a:b:c, %s=d:e)" % (
settings.TEST_STATUS_DISPLAY['ok'], settings.TEST_STATUS_DISPLAY['tolerance']
)
assert t.name == expected
def test_no_duplicates(self):
utils.create_tolerance(act_high=2, act_low=-2, tol_high=1, tol_low=-1, tol_type=models.PERCENT)
with self.assertRaises(IntegrityError):
utils.create_tolerance(act_high=2, act_low=-2, tol_high=1, tol_low=-1, tol_type=models.PERCENT)
def test_get_by_natural_key(self):
t = utils.create_tolerance(act_high=2, act_low=-2, tol_high=1, tol_low=-1, tol_type=models.ABSOLUTE)
assert models.Tolerance.objects.get_by_natural_key(t.name).id == t.pk
def test_natural_key(self):
t = utils.create_tolerance(act_high=2, act_low=-2, tol_high=1, tol_low=-1, tol_type=models.ABSOLUTE)
assert t.natural_key() == (t.name,)
class TestTestCollectionInterface(TestCase):
def test_abstract_test_list_members(self):
self.assertRaises(NotImplementedError, models.TestCollectionInterface().test_list_members)
class TestTest(TestCase):
def create_test(self, **kwargs):
return models.Test(**kwargs)
def test_is_boolean(self):
test = self.create_test(name="bool", type=models.BOOLEAN)
assert test.is_boolean()
def test_is_date(self):
test = self.create_test(name="date", type=models.DATE)
assert test.is_date()
def test_is_datetime(self):
test = self.create_test(name="datetime", type=models.DATETIME)
assert test.is_datetime()
def test_is_string(self):
test = self.create_test(name="string", type=models.STRING)
assert test.is_string()
def test_is_string_composite(self):
test = self.create_test(name="stringcomp", type=models.STRING_COMPOSITE)
assert test.is_string_composite()
def test_is_upload(self):
test = self.create_test(name="upload", type=models.UPLOAD)
assert test.is_upload()
def test_can_attach(self):
for tt in (models.STRING_COMPOSITE, models.COMPOSITE, models.UPLOAD):
assert models.Test(type=tt).can_attach()
def test_is_numerical_type(self):
for t in (models.COMPOSITE, models.CONSTANT, models.SIMPLE):
test = self.create_test(name="num", type=t)
assert test.is_numerical_type()
def test_is_string_type(self):
for t in (models.STRING_COMPOSITE, models.STRING):
test = self.create_test(name="str", type=t)
assert test.is_string_type()
def test_is_date_type(self):
for t in (models.DATE, models.DATETIME):
test = self.create_test(name="date", type=t)
assert test.is_date_type()
def test_valid_check_type(self):
types = (
("choices", "foo, bar", models.MULTIPLE_CHOICE, "Multiple Choice"),
("constant_value", 1.0, models.CONSTANT, "Constant"),
("calculation_procedure", "result=foo", models.COMPOSITE, "Composite"),
)
for attr, val, ttype, display in types:
test = self.create_test(name=display, type=ttype)
setattr(test, attr, val)
test.check_test_type(getattr(test, attr), ttype, display)
def test_invalid_check_type(self):
types = (
("choices", "foo, bar", models.CONSTANT, "Invalid"),
("constant_value", 1., models.COMPOSITE, "Constant"),
("calculation_procedure", "result=foo", models.MULTIPLE_CHOICE, "Composite"),
("choices", None, models.MULTIPLE_CHOICE, "Multiple Choice"),
("constant_value", None, models.COMPOSITE, "Constant"),
("calculation_procedure", None, models.COMPOSITE, "Composite"),
)
for attr, val, ttype, display in types:
test = self.create_test(name=display, type=ttype)
setattr(test, attr, val)
type = ttype if val is None else models.SIMPLE
errors = test.check_test_type(getattr(test, attr), type, display)
assert len(errors) > 0
def test_clean_calc_proc_not_needed(self):
test = self.create_test(type=models.SIMPLE)
assert test.clean_calculation_procedure() is None
def test_invalid_clean_calculation_procedure(self):
test = self.create_test(type=models.COMPOSITE)
invalid_calc_procedures = (
"resul t = a + b",
"_result = a + b",
"0result = a+b",
"result_=foo",
"",
"foo = a +b",
"foo = __import__('bar')",
"result = (a+b",
)
for icp in invalid_calc_procedures:
test.calculation_procedure = icp
try:
msg = "Passed but should have failed:\n %s" % icp
test.clean_calculation_procedure()
except ValidationError:
msg = ""
assert len(msg) == 0, msg
def test_valid_calc_procedure(self):
test = self.create_test(type=models.COMPOSITE)
valid_calc_procedures = (
"result = a + b", "result = 42", """foo = a + b
result = foo + bar""", """foo = a + b
result = foo + bar
"""
)
for vcp in valid_calc_procedures:
test.calculation_procedure = vcp
try:
msg = ""
test.clean_calculation_procedure()
except ValidationError:
msg = "Failed but should have passed:\n %s" % vcp
assert len(msg) == 0, msg
def test_clean_constant_value(self):
test = self.create_test(type=models.CONSTANT)
with pytest.raises(ValidationError):
test.clean_constant_value()
test.constant_value = 1
assert test.clean_constant_value() is None
def test_clean_mult_choice_not_needed(self):
test = self.create_test(type=models.SIMPLE)
assert test.clean_choices() is None
def test_valid_mult_choice(self):
test = self.create_test(type=models.MULTIPLE_CHOICE)
valid = ("foo, bar, baz", "foo, bar, baz", "foo, \tbar")
for v in valid:
test.choices = v
test.clean_choices()
test.choices = valid[0]
test.clean_choices()
assert [("foo", "foo"), ("bar", "bar"), ("baz", "baz")] == test.get_choices()
def test_invalid_mult_choice(self):
test = self.create_test(type=models.MULTIPLE_CHOICE)
invalid = (
None,
"",
" ",
)
for i in invalid:
test.choices = i
with pytest.raises(ValidationError):
test.clean_choices()
def test_invalid_clean_slug(self):
test = self.create_test()
invalid = ("0 foo", "foo ", " foo" "foo bar", "foo*bar", "%foo", "foo$")
for i in invalid:
test.slug = i
try:
msg = "Short name should have failed but passed: %s" % i
test.clean_slug()
except ValidationError:
msg = ""
assert len(msg) == 0, msg
test.type = models.COMPOSITE
test.slug = ""
with pytest.raises(ValidationError):
test.clean_slug()
def test_valid_clean_slug(self):
test = self.create_test()
valid = ("foo", "f6oo", "foo6", "_foo", "foo_", "foo_bar")
for v in valid:
test.slug = v
try:
msg = ""
test.clean_slug()
except ValidationError:
msg = "Short name should have passed but failed: %s" % v
assert len(msg) == 0, msg
@pytest.mark.django_db
def test_clean_fields(self):
test = utils.create_test()
test.clean_fields()
def test_get_choices(self):
test = self.create_test(type=models.MULTIPLE_CHOICE)
test.choices = "a,b"
assert test.get_choices() == [("a", "a"), ("b", "b")]
def test_display(self):
assert models.Test(display_name="display", name="name").display() == "display"
assert models.Test(name="name").display() == "name"
class TestOnTestSaveSignal(TestCase):
def test_valid_bool_check(self):
ref = utils.create_reference(value=3)
uti = utils.create_unit_test_info(ref=ref)
uti.test.type = models.BOOLEAN
self.assertRaises(ValidationError, uti.test.save)
class TestUnitTestInfo(TestCase):
def setUp(self):
self.test = utils.create_test()
self.test_list = utils.create_test_list()
utils.create_test_list_membership(test=self.test, test_list=self.test_list)
self.utc = utils.create_unit_test_collection(test_collection=self.test_list)
self.uti = models.UnitTestInfo.objects.get(test=self.test, unit=self.utc.unit)
self.tli = utils.create_test_list_instance(unit_test_collection=self.utc)
def test_percentage_ref(self):
self.uti.reference = utils.create_reference(value=0)
self.uti.tolerance = utils.create_tolerance(tol_type=models.PERCENT)
self.assertRaises(ValidationError, self.uti.clean)
def test_boolean_ref(self):
self.uti.reference = utils.create_reference(value=3)
self.uti.test.type = models.BOOLEAN
self.assertRaises(ValidationError, self.uti.clean)
def test_history(self):
td = timezone.timedelta
now = timezone.now()
status = utils.create_status()
# values purposely utils.created out of order to make sure history
# returns in correct order (i.e. ordered by date)
history = [
(now + td(days=4), 5., models.NO_TOL, status),
(now + td(days=1), 5., models.NO_TOL, status),
(now + td(days=3), 6., models.NO_TOL, status),
(now + td(days=2), 7., models.NO_TOL, status),
]
for wc, val, _, _ in history:
utils.create_test_instance(self.tli, unit_test_info=self.uti, status=status, work_completed=wc, value=val)
sorted_hist = list(sorted([(x[0].replace(second=0, microsecond=0), x[1], x[2], x[3]) for x in history]))
uti_hist = [(x[0].replace(second=0, microsecond=0), x[1], x[2], x[3]) for x in self.uti.get_history()]
self.assertListEqual(sorted_hist, uti_hist)
# test returns correct number of results
limited = [(x[0].replace(second=0, microsecond=0), x[1], x[2], x[3]) for x in self.uti.get_history(number=2)]
self.assertListEqual(sorted_hist[-2:], limited)
def test_add_to_cycle(self):
models.UnitTestInfo.objects.all().delete()
tl1 = utils.create_test_list("tl1")
tl2 = utils.create_test_list("tl2")
t1 = utils.create_test("t1")
t2 = utils.create_test("t2")
utils.create_test_list_membership(tl1, t1)
utils.create_test_list_membership(tl2, t2)
cycle = utils.create_cycle(test_lists=[tl1, tl2])
utils.create_unit_test_collection(test_collection=cycle, unit=self.utc.unit, frequency=self.utc.frequency)
utis = models.UnitTestInfo.objects.all()
self.assertEqual(len(utis), 2)
t3 = utils.create_test("t3")
utils.create_test_list_membership(tl2, t3)
utis = models.UnitTestInfo.objects.all()
self.assertEqual(len(utis), 3)
def test_read_test(self):
utis = models.UnitTestInfo.objects.all()
self.assertEqual(utis.count(), 1)
utis.delete()
self.assertEqual(utis.count(), 0)
self.test_list.save()
self.assertEqual(models.UnitTestInfo.objects.count(), 1)
def test_active_only_simple(self):
utis = models.UnitTestInfo.objects.active()
self.assertEqual(utis.count(), 1)
self.utc.active = False
self.utc.save()
self.assertEqual(models.UnitTestInfo.objects.active().count(), 0)
def test_active_only_with_multiple_lists(self):
tl2 = utils.create_test_list("tl2")
t2 = utils.create_test("t2")
utils.create_test_list_membership(tl2, self.test)
utils.create_test_list_membership(tl2, t2)
utc2 = utils.create_unit_test_collection(test_collection=tl2, unit=self.utc.unit, frequency=self.utc.frequency)
utis = models.UnitTestInfo.objects.active()
# only 2 active because t1 and t2 shared between tl1 and tl2
self.assertEqual(utis.count(), 2)
# uti for t1 should stay active because it's present in utc2
self.utc.active = False
self.utc.save()
self.assertEqual(models.UnitTestInfo.objects.active().count(), 2)
utc2.active = False
utc2.save()
self.utc.active = True
self.utc.save()
self.assertEqual(models.UnitTestInfo.objects.active().count(), 1)
def test_active_only_with_cycle(self):
tl2 = utils.create_test_list("tl2")
t2 = utils.create_test("t2")
utils.create_test_list_membership(tl2, self.test)
utils.create_test_list_membership(tl2, t2)
utc2 = utils.create_unit_test_collection(unit=self.utc.unit, test_collection=tl2, frequency=self.utc.frequency)
tl3 = utils.create_test_list("tl3")
t3 = utils.create_test("t3")
utils.create_test_list_membership(tl3, self.test)
utils.create_test_list_membership(tl2, t3)
tlc = utils.create_cycle([tl2, tl3])
utc3 = utils.create_unit_test_collection(test_collection=tlc, unit=utc2.unit, frequency=utc2.frequency)
utis = models.UnitTestInfo.objects.active()
self.assertEqual(utis.count(), 3)
# uti for t1 should stay active because it's present in utc2
self.utc.active = False
self.utc.save()
# all still should be active since t1 is present in tl2
self.assertEqual(models.UnitTestInfo.objects.active().count(), 3)
utc2.active = False
utc2.save()
# all still should be active since tl2 is present in tlc
self.assertEqual(models.UnitTestInfo.objects.active().count(), 3)
self.utc.active = True
self.utc.save()
utc3.active = False
utc3.save()
# only utc1 is active now
self.assertEqual(models.UnitTestInfo.objects.active().count(), 1)
def test_inactive_only_simple(self):
utis = models.UnitTestInfo.objects.inactive()
self.assertEqual(utis.count(), 0)
self.utc.active = False
self.utc.save()
self.assertEqual(models.UnitTestInfo.objects.inactive().count(), 1)
class TestTestListMembership(TestCase):
def test_get_by_natural_key(self):
tlm = utils.create_test_list_membership()
assert models.TestListMembership.objects.get_by_natural_key(
tlm.test_list.slug,
tlm.test.name,
).id == tlm.id
class TestTestList(TestCase):
def test_get_list(self):
tl = models.TestList()
self.assertEqual((0, tl), tl.get_list())
def test_test_list_members(self):
tl = utils.create_test_list()
self.assertListEqual([tl], list(tl.test_list_members()))
def test_get_next_list(self):
tl = models.TestList()
self.assertEqual((0, tl), tl.next_list(None))
def test_first(self):
tl = models.TestList()
self.assertEqual(tl, tl.first())
def test_all_tests(self):
""""""
tl = utils.create_test_list()
tests = [utils.create_test(name="test %d" % i) for i in range(4)]
for order, test in enumerate(tests):
utils.create_test_list_membership(test_list=tl, test=test, order=order)
self.assertSetEqual(set(tests), set(tl.all_tests()))
def test_content_type(self):
tl = utils.create_test_list()
self.assertEqual(tl.content_type(), ContentType.objects.get(model="testlist"))
def test_all_lists(self):
tl1 = utils.create_test_list(name="1")
tl2 = utils.create_test_list(name="2")
models.Sublist.objects.create(parent=tl1, child=tl2, order=0)
self.assertSetEqual(set([tl1, tl2]), set(tl1.all_lists()))
def test_ordered_tests(self):
tl1 = utils.create_test_list(name="1")
tl2 = utils.create_test_list(name="2")
t1 = utils.create_test()
t2 = utils.create_test("test2")
utils.create_test_list_membership(test_list=tl1, test=t1)
utils.create_test_list_membership(test_list=tl2, test=t2)
models.Sublist.objects.create(parent=tl1, child=tl2, order=0)
self.assertListEqual(list(tl1.ordered_tests()), [t1, t2])
def test_ordered_tests_sublist(self):
tl1 = utils.create_test_list(name="1")
tl2 = utils.create_test_list(name="2")
tl3 = utils.create_test_list(name="3")
t1a = utils.create_test()
t1b = utils.create_test()
t2a = utils.create_test("test2a")
t2b = utils.create_test("test2b")
t3 = utils.create_test("test3")
utils.create_test_list_membership(test_list=tl1, test=t1a, order=0) # 0
utils.create_test_list_membership(test_list=tl1, test=t1b, order=4) # 4
utils.create_test_list_membership(test_list=tl2, test=t2a, order=1) # 2
utils.create_test_list_membership(test_list=tl2, test=t2b, order=0) # 1
utils.create_test_list_membership(test_list=tl3, test=t3, order=0) # 3
models.Sublist.objects.create(parent=tl1, child=tl2, order=1)
models.Sublist.objects.create(parent=tl1, child=tl3, order=3)
self.assertListEqual(list(tl1.ordered_tests()), [t1a, t2b, t2a, t3, t1b])
def test_len(self):
self.assertEqual(1, len(utils.create_test_list()))
class TestTestListCycle(TestCase):
def setUp(self):
super(TestTestListCycle, self).setUp()
daily = utils.create_frequency(interval=1, window_end=0)
utils.create_status()
self.empty_cycle = utils.create_cycle(name="empty")
self.empty_utc = utils.create_unit_test_collection(test_collection=self.empty_cycle, frequency=daily)
self.test_lists = [utils.create_test_list(name="test list %d" % i) for i in range(2)]
self.tests = []
for i, test_list in enumerate(self.test_lists):
test = utils.create_test(name="test %d" % i)
utils.create_test_list_membership(test_list, test)
self.tests.append(test)
self.cycle = utils.create_cycle(test_lists=self.test_lists)
self.utc = utils.create_unit_test_collection(
test_collection=self.cycle, frequency=daily, unit=self.empty_utc.unit
)
def test_get_list(self):
for day, test_list in enumerate(self.test_lists):
self.assertEqual((day, test_list), self.cycle.get_list(day))
self.assertEqual((None, None), self.empty_cycle.get_list())
def test_cycle_test_list_members(self):
self.assertListEqual(self.test_lists, list(self.cycle.test_list_members()))
def test_get_next_list(self):
next_ = self.cycle.next_list(0)
self.assertEqual((1, self.test_lists[1]), next_)
next_ = self.cycle.next_list(1)
self.assertEqual((0, self.cycle.first()), next_)
self.assertEqual((None, None), self.empty_cycle.next_list(None))
def test_first(self):
self.assertEqual(self.cycle.first(), self.test_lists[0])
self.assertFalse(self.empty_cycle.first())
def test_all_tests(self):
self.assertSetEqual(set(self.tests), set(self.cycle.all_tests()))
self.assertEqual(0, self.empty_cycle.all_tests().count())
def test_content_type(self):
tl = utils.create_test_list()
self.assertEqual(tl.content_type(), ContentType.objects.get(model="testlist"))
def test_all_lists(self):
self.assertSetEqual(set(self.test_lists), set(self.cycle.all_lists()))
self.assertFalse(self.empty_cycle.all_lists())
def test_len(self):
self.assertEqual(0, len(models.TestListCycle()))
self.assertEqual(2, len(self.cycle))
self.assertEqual(0, len(self.empty_cycle))
def test_update_last_instance(self):
"""
When a test list instance is created for a test list that is part of more than one cycle
assigned to a unit, it only update the last_instance attribute of the UTC for which it was
performed.
i.e. Imagine a unit has assigned to it two Cycles, C1 (UTC1) & C2
(UTC2), and both contain test list TL. Completeing TL as part of UTC1
should not update the last_instance attribute of UTC2.
"""
cycle2 = utils.create_cycle(name="cyle2", test_lists=self.test_lists)
utc2 = utils.create_unit_test_collection(test_collection=cycle2, unit=self.utc.unit)
assert self.utc.last_instance is None
tli = utils.create_test_list_instance(unit_test_collection=utc2, work_completed=timezone.now(), day=0)
self.utc.refresh_from_db()
assert self.utc.last_instance is None
utc2.refresh_from_db()
assert utc2.last_instance.pk == tli.pk
class TestUTCDueDates(TestCase):
def setUp(self):
test = utils.create_test()
test_list = utils.create_test_list()
utils.create_test_list_membership(test=test, test_list=test_list)
self.valid_status = models.TestInstanceStatus(
name="valid",
slug="valid",
is_default=True,
requires_review=True,
valid=True,
)
self.valid_status.save()
self.invalid_status = models.TestInstanceStatus(
name="invalid",
slug="invalid",
is_default=False,
requires_review=False,
valid=False,
)
self.invalid_status.save()
self.daily = utils.create_frequency(name="daily", slug="daily", interval=1, window_end=0)
self.monthly = utils.create_frequency(name="monthly", slug="monthly", interval=28, window_end=7)
self.utc_hist = utils.create_unit_test_collection(test_collection=test_list, frequency=self.daily)
self.uti_hist = models.UnitTestInfo.objects.get(test=test, unit=self.utc_hist.unit)
def test_no_history(self):
self.assertIsNone(self.utc_hist.due_date)
def test_basic(self):
# test case where utc is completed with valid status
now = timezone.now()
tli = utils.create_test_list_instance(unit_test_collection=self.utc_hist, work_completed=now)
utils.create_test_instance(tli, unit_test_info=self.uti_hist, status=self.valid_status)
self.utc_hist.refresh_from_db()
expected = now + timezone.timedelta(days=1)
assert self.utc_hist.due_date.date() == expected.date()
def test_invalid_without_history(self):
# test case where utc has no history and is completed with invalid status
now = timezone.now()
tli = utils.create_test_list_instance(unit_test_collection=self.utc_hist, work_completed=now)
utils.create_test_instance(tli, unit_test_info=self.uti_hist, status=self.invalid_status)
tli.save()
self.utc_hist.refresh_from_db()
assert self.utc_hist.due_date.date() == now.date()
def test_modified_to_invalid(self):
# test case where utc with history was created with valid status and
# later changed to have invalid status
# first create valid history
now = timezone.now()
tli1 = utils.create_test_list_instance(unit_test_collection=self.utc_hist, work_completed=now)
utils.create_test_instance(tli1, unit_test_info=self.uti_hist, status=self.valid_status)
tli1.save()
self.utc_hist.refresh_from_db()
# now create 2nd valid history
orig_due_date = self.utc_hist.due_date
tli2 = utils.create_test_list_instance(unit_test_collection=self.utc_hist, work_completed=orig_due_date)
ti2 = utils.create_test_instance(tli2, unit_test_info=self.uti_hist, status=self.valid_status)
tli2.save()
self.utc_hist.refresh_from_db()
self.utc_hist = models.UnitTestCollection.objects.get(pk=self.utc_hist.pk)
expected = orig_due_date + timezone.timedelta(days=1)
assert self.utc_hist.due_date.date() == expected.date()
# now mark ti2 as invali
ti2.status = self.invalid_status
ti2.save()
self.utc_hist.refresh_from_db()
self.utc_hist.set_due_date()
assert self.utc_hist.due_date.date() == orig_due_date.date()
def test_modified_to_valid(self):
# test case where test list was saved with invalid status and later
# updated to have valid status
# first create valid history
now = timezone.now()
tli1 = utils.create_test_list_instance(unit_test_collection=self.utc_hist, work_completed=now)
utils.create_test_instance(tli1, unit_test_info=self.uti_hist, status=self.valid_status)
tli1.save()
# now create 2nd with invalid status
now = timezone.now()
tli2 = utils.create_test_list_instance(
unit_test_collection=self.utc_hist, work_completed=now + timezone.timedelta(days=1)
)
ti2 = utils.create_test_instance(tli2, unit_test_info=self.uti_hist, status=self.invalid_status)
tli2.save()
# due date should be based on tli1 since tli2 is invalid
self.utc_hist = models.UnitTestCollection.objects.get(pk=self.utc_hist.pk)
self.assertEqual(self.utc_hist.due_date.date(), (tli1.work_completed + timezone.timedelta(days=1)).date())
# now mark ti2 as valid
ti2.status = self.valid_status
ti2.save()
self.utc_hist.set_due_date()
# due date should now be based on tli2 since it is valid
self.assertEqual(self.utc_hist.due_date.date(), (tli2.work_completed + timezone.timedelta(days=1)).date())
def test_due_date_not_updated_for_in_progress(self):
# test case where utc with history was created with valid status and
# later changed to have invlaid status
# first create valid history
now = timezone.now()
tli1 = utils.create_test_list_instance(unit_test_collection=self.utc_hist, work_completed=now)
utils.create_test_instance(tli1, unit_test_info=self.uti_hist, status=self.valid_status)
tli1.save()
# now create 2nd in progress history
now = timezone.now()
tli2 = utils.create_test_list_instance(
unit_test_collection=self.utc_hist, work_completed=now + timezone.timedelta(days=1)
)
utils.create_test_instance(tli2, unit_test_info=self.uti_hist, status=self.valid_status)
tli2.in_progress = True
tli2.save()
self.utc_hist.refresh_from_db()
self.assertEqual(self.utc_hist.due_date.date(), (tli1.work_completed + timezone.timedelta(days=1)).date())
def test_due_date_not_updated_for_unscheduled(self):
# first create valid history
now = timezone.now()
tli1 = utils.create_test_list_instance(unit_test_collection=self.utc_hist, work_completed=now)
utils.create_test_instance(tli1, unit_test_info=self.uti_hist, status=self.valid_status)
tli1.save()
# now create 2nd unscheduled
now = timezone.now()
tli2 = utils.create_test_list_instance(
unit_test_collection=self.utc_hist, work_completed=now + timezone.timedelta(days=1)
)
utils.create_test_instance(tli2, unit_test_info=self.uti_hist, status=self.valid_status)
tli2.include_for_scheduling = False
tli2.save()
self.utc_hist.refresh_from_db()
self.assertEqual(self.utc_hist.due_date.date(), (tli1.work_completed + timezone.timedelta(days=1)).date())
def test_cycle_due_date(self):
test_lists = [utils.create_test_list(name="test list %d" % i) for i in range(2)]
for i, test_list in enumerate(test_lists):
test = utils.create_test(name="test %d" % i)
utils.create_test_list_membership(test_list, test)
cycle = utils.create_cycle(test_lists=test_lists)
daily = utils.create_frequency(interval=1, window_end=0)
status = utils.create_status()
utc = utils.create_unit_test_collection(test_collection=cycle, frequency=daily, unit=self.utc_hist.unit)
now = timezone.now()
day, tl = utc.next_list()
uti = models.UnitTestInfo.objects.get(test=tl.all_tests()[0], unit=utc.unit)
tli = utils.create_test_list_instance(unit_test_collection=utc, work_completed=now)
utils.create_test_instance(tli, unit_test_info=uti, work_completed=now, status=status)
tli.save()
utc.refresh_from_db()
assert utc.due_date.date() == (now + timezone.timedelta(days=1)).date()
uti = models.UnitTestInfo.objects.get(test=test_lists[1].tests.all()[0], unit=utc.unit)
utils.create_test_instance(tli, unit_test_info=uti, work_completed=now, status=status)
utc.refresh_from_db()
assert utc.due_date.date() == (now + timezone.timedelta(days=1)).date()
class TestUnitTestCollection(TestCase):
def test_manager_by_unit(self):
utc = utils.create_unit_test_collection()
self.assertListEqual(list(models.UnitTestCollection.objects.by_unit(utc.unit)), [utc])
def test_manager_by_frequency(self):
utc = utils.create_unit_test_collection()
self.assertListEqual(list(models.UnitTestCollection.objects.by_frequency(utc.frequency)), [utc])
def test_manager_by_unit_frequency(self):
utc = utils.create_unit_test_collection()
self.assertListEqual(list(models.UnitTestCollection.objects.by_unit_frequency(utc.unit, utc.frequency)), [utc])
def test_manager_test_lists(self):
utc = utils.create_unit_test_collection()
self.assertListEqual(list(models.UnitTestCollection.objects.test_lists()), [utc])
def test_adhoc_due_status(self):
now = timezone.now()
utc = utils.create_unit_test_collection(frequency=None, null_frequency=True)
self.assertEqual(scheduling.NO_DUE_DATE, utc.due_status())
utc.set_due_date(now - timezone.timedelta(days=1))
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
self.assertEqual(utc.due_status(), scheduling.OVERDUE)
def test_daily_due_status(self):
now = timezone.now()
daily = utils.create_frequency(interval=1, window_end=0)
utc = utils.create_unit_test_collection(frequency=daily)
self.assertEqual(scheduling.NO_DUE_DATE, utc.due_status())
daily_statuses = (
(-2, scheduling.OVERDUE),
(-1, scheduling.OVERDUE),
(0, scheduling.NOT_DUE),
(1, scheduling.NOT_DUE),
)
for delta, due_status in daily_statuses:
wc = now + timezone.timedelta(days=delta)
utils.create_test_list_instance(unit_test_collection=utc, work_completed=wc)
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
self.assertEqual(utc.due_status(), due_status)
def test_weekly_due_status(self):
now = timezone.now()
weekly = utils.create_frequency(interval=7, window_end=2)
utc = utils.create_unit_test_collection(frequency=weekly)
self.assertEqual(scheduling.NO_DUE_DATE, utc.due_status())
weekly_statuses = ((-10, scheduling.OVERDUE),
(-8, scheduling.DUE),
(-7, scheduling.DUE),
(-6, scheduling.NOT_DUE),
(1, scheduling.NOT_DUE))
for delta, due_status in weekly_statuses:
wc = now + timezone.timedelta(days=delta)
utils.create_test_list_instance(unit_test_collection=utc, work_completed=wc)
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
self.assertEqual(utc.due_status(), due_status)
@mock.patch('django.utils.timezone.now', mock.Mock(side_effect=utc_2am))
def test_date_straddle_due_status(self):
"""
Ensure that due_status is correct when test list is due tomorrow
and current local time + UTC offset crosses midnight.
e.g. the situation where:
utc.due_date == 2 April 2014 14:00 (UTC)
timezone.localtime(timezone.now()).date() == 1 April 2014
but
timezone.now().date() == 2 April 2014
"""
with timezone.override("America/Toronto"):
weekly = utils.create_frequency(interval=7, window_end=2)
utc = utils.create_unit_test_collection(frequency=weekly)
utc.set_due_date(utc_2am() + timezone.timedelta(hours=12))
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
self.assertEqual(utc.due_status(), scheduling.NOT_DUE)
def test_set_due_date(self):
due_date = timezone.now() + timezone.timedelta(days=1)
utc = utils.create_unit_test_collection()
utc.set_due_date(due_date)
self.assertEqual(utc.due_date, due_date)
def test_set_due_date_none(self):
now = timezone.now()
utc = utils.create_unit_test_collection()
utils.create_test_list_instance(unit_test_collection=utc, work_completed=now)
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
utc.set_due_date(due_date=None)
due = now + timezone.timedelta(days=1)
assert utc.due_date.date() == due.date()
def test_last_done_date(self):
now = timezone.now()
utc = utils.create_unit_test_collection()
self.assertFalse(utc.unreviewed_instances())
tli = utils.create_test_list_instance(unit_test_collection=utc, work_completed=now)
test = utils.create_test(name="tester")
utils.create_test_list_membership(tli.test_list, test)
uti = models.UnitTestInfo.objects.get(test=test, unit=utc.unit)
utils.create_test_instance(tli, unit_test_info=uti, work_completed=now)
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
self.assertTrue(utils.datetimes_same(now, utc.last_done_date()))
def test_unreviewed_instances(self):
utc = utils.create_unit_test_collection()
self.assertFalse(utc.unreviewed_instances())
tli = utils.create_test_list_instance(unit_test_collection=utc)
test = utils.create_test(name="tester")
utils.create_test_list_membership(tli.test_list, test)
# uti = utils.create_unit_test_info(test=test, unit=utc.unit, frequency=utc.frequency)
uti = models.UnitTestInfo.objects.get(test=test, unit=utc.unit)
utils.create_test_instance(tli, unit_test_info=uti)
self.assertEqual([tli], list(utc.unreviewed_instances()))
def test_last_completed_instance(self):
utc = utils.create_unit_test_collection()
self.assertFalse(utc.unreviewed_instances())
test = utils.create_test(name="tester")
utils.create_test_list_membership(utc.tests_object, test)
self.assertIsNone(utc.last_instance)
uti = models.UnitTestInfo.objects.get(test=test, unit=utc.unit)
tli = utils.create_test_list_instance(unit_test_collection=utc)
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
utils.create_test_instance(tli, unit_test_info=uti)
self.assertEqual(tli, utc.last_instance)
def test_unreview_test_instances(self):
utc = utils.create_unit_test_collection()
self.assertFalse(utc.unreviewed_instances())
test = utils.create_test(name="tester")
utils.create_test_list_membership(utc.tests_object, test)
self.assertIsNone(utc.last_instance)
tli = utils.create_test_list_instance(unit_test_collection=utc)
uti = models.UnitTestInfo.objects.get(test=test, unit=utc.unit)
ti = utils.create_test_instance(tli, unit_test_info=uti)
self.assertEqual([ti], list(utc.unreviewed_test_instances()))
def test_history(self):
td = timezone.timedelta
now = timezone.now()
utc = utils.create_unit_test_collection()
test = utils.create_test(name="tester")
utils.create_test_list_membership(utc.tests_object, test)
uti = models.UnitTestInfo.objects.latest("pk")
status = utils.create_status()
# values purposely utils.created out of order to make sure history
# returns in correct order (i.e. ordered by date)
history = [
now - td(days=4),
now - td(days=1),
now - td(days=3),
now - td(days=2),
]
tlis = []
tis = []
for wc in history:
tli = utils.create_test_list_instance(unit_test_collection=utc, work_completed=wc)
ti = utils.create_test_instance(tli, unit_test_info=uti, work_completed=wc, status=status)
tis.append(ti)
tlis.append(tli)
tlis.sort(key=lambda x: x.work_completed, reverse=True)
tis.sort(key=lambda x: x.work_completed, reverse=True)
sorted_hist = list(reversed(sorted([h.replace(second=0, microsecond=0) for h in history])))
test_hist, dates = utc.history(before=now)
dates = [d.replace(second=0, microsecond=0) for (tli_url, d) in dates]
wcs = [x.work_completed.replace(second=0, microsecond=0) for x in tlis]
self.assertEqual(sorted_hist, dates)
self.assertEqual(sorted_hist, wcs)
# test returns correct number of results
self.assertEqual([(test, list(zip(tlis, tis)))], test_hist)
def test_test_list_next_list(self):
utc = utils.create_unit_test_collection()
self.assertEqual(utc.next_list(), (0, utc.tests_object))
utils.create_test_list_instance(unit_test_collection=utc)
self.assertEqual(utc.next_list(), (0, utc.tests_object))
def test_cycle_next_list_empty(self):
cycle = utils.create_cycle()
utc = utils.create_unit_test_collection(test_collection=cycle)
self.assertEqual(utc.next_list(), (None, None))
def test_cycle_next_list(self):
test_lists = [utils.create_test_list(name="test list %d" % i) for i in range(2)]
for i, test_list in enumerate(test_lists):
test = utils.create_test(name="test %d" % i)
utils.create_test_list_membership(test_list, test)
cycle = utils.create_cycle(test_lists=test_lists)
utc = utils.create_unit_test_collection(test_collection=cycle)
self.assertEqual(utc.next_list(), (0, test_lists[0]))
tli = utils.create_test_list_instance(unit_test_collection=utc, test_list=test_lists[0])
# need to regrab from db since since last_instance was updated in the db
# by signal handler
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
self.assertEqual(utc.next_list(), (1, test_lists[1]))
work_completed = tli.work_completed + timezone.timedelta(hours=1)
utils.create_test_list_instance(
unit_test_collection=utc, test_list=test_lists[1], day=1, work_completed=work_completed
)
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
self.assertEqual(utc.next_list(), (0, test_lists[0]))
def test_cycle_next_list_with_repeats(self):
test_lists = [utils.create_test_list(name="test list %d" % i) for i in range(2)]
test_lists = test_lists + test_lists
for i, test_list in enumerate(test_lists):
test = utils.create_test(name="test %d" % i)
utils.create_test_list_membership(test_list, test)
cycle = utils.create_cycle(test_lists=test_lists)
utc = utils.create_unit_test_collection(test_collection=cycle)
self.assertEqual(utc.next_list(), (0, test_lists[0]))
tli = utils.create_test_list_instance(unit_test_collection=utc, test_list=test_lists[0], day=2)
# need to regrab from db since since last_instance was updated in the db
# by signal handler
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
self.assertEqual(utc.next_list(), (3, test_lists[3]))
work_completed = tli.work_completed + timezone.timedelta(hours=1)
utils.create_test_list_instance(
unit_test_collection=utc, test_list=test_lists[3], day=3, work_completed=work_completed
)
utc = models.UnitTestCollection.objects.get(pk=utc.pk)
self.assertEqual(utc.next_list(), (0, test_lists[0]))
def test_cycle_get_list(self):
test_lists = [utils.create_test_list(name="test list %d" % i) for i in range(2)]
for i, test_list in enumerate(test_lists):
test = utils.create_test(name="test %d" % i)
utils.create_test_list_membership(test_list, test)
cycle = utils.create_cycle(test_lists=test_lists)
utc = utils.create_unit_test_collection(test_collection=cycle)
for i, test_list in enumerate(test_lists):
self.assertEqual(utc.get_list(i), (i, test_list))
self.assertEqual(utc.get_list(), (0, test_lists[0]))
def test_cycle_delete_day(self):
test_lists = [utils.create_test_list(name="test list %d" % i) for i in range(2)]
for i, test_list in enumerate(test_lists):
test = utils.create_test(name="test %d" % i)
utils.create_test_list_membership(test_list, test)
cycle = utils.create_cycle(test_lists=test_lists)
utc = utils.create_unit_test_collection(test_collection=cycle)
self.assertEqual(utc.next_list(), (0, test_lists[0]))
tli = utils.create_test_list_instance(unit_test_collection=utc, test_list=test_lists[0])
membership = cycle.testlistcyclemembership_set.get(test_list=tli.test_list)
membership.delete()
cycle.testlistcyclemembership_set.filter(test_list=test_lists[1]).update(order=0)
self.assertEqual(cycle.next_list(tli.day), (0, cycle.first()))
def test_name(self):
tl = utils.create_test_list("tl1")
utc = utils.create_unit_test_collection(test_collection=tl)
self.assertEqual(utc.name, str(utc))
self.assertEqual(tl.name, utc.name)
class TestSignals(TestCase):
def test_list_assigned_to_unit(self):
test = utils.create_test(name="test")
test_list = utils.create_test_list()
utils.create_test_list_membership(test_list, test)
utc = utils.create_unit_test_collection(test_collection=test_list)
utis = list(models.UnitTestInfo.objects.all())
# test list on its own
self.assertEqual(len(utis), 1)
self.assertListEqual([utc.unit, test], [utis[0].unit, utis[0].test])
# test utis are utils.created for sublists
sub_test = utils.create_test(name="sub")
sub_list = utils.create_test_list(name="sublist")
utils.create_test_list_membership(sub_list, sub_test)
models.Sublist.objects.create(parent=test_list, child=sub_list, order=0)
utis = list(models.UnitTestInfo.objects.all())
self.assertEqual(len(utis), 2)
self.assertListEqual([utc.unit, sub_test], [utis[1].unit, utis[1].test])
def test_sublist_changed(self):
test = utils.create_test(name="test")
test_list = utils.create_test_list()
utils.create_test_list_membership(test_list, test)
utc = utils.create_unit_test_collection(test_collection=test_list)
# test utis are utils.created for sublists
sub_test = utils.create_test(name="sub")
sub_list = utils.create_test_list(name="sublist")
utils.create_test_list_membership(sub_list, sub_test)
models.Sublist.objects.create(parent=test_list, child=sub_list, order=0)
utis = list(models.UnitTestInfo.objects.all())
self.assertEqual(len(utis), 2)
self.assertListEqual([utc.unit, sub_test], [utis[1].unit, utis[1].test])
sub_test2 = utils.create_test(name="sub2")
utils.create_test_list_membership(sub_list, sub_test2)
utis = list(models.UnitTestInfo.objects.all())
self.assertEqual(len(utis), 3)
def test_test_cycle_changed(self):
test_lists = [utils.create_test_list(name="test list %d" % i) for i in range(4)]
tests = []
for i, test_list in enumerate(test_lists):
test = utils.create_test(name="test %d" % i)
utils.create_test_list_membership(test_list, test)
tests.append(test)
cycle1 = utils.create_cycle(test_lists=test_lists[:2])
cycle2 = utils.create_cycle(name="cycle2", test_lists=test_lists[2:])
utc = utils.create_unit_test_collection(test_collection=cycle1)
# change test collection
utc.tests_object = cycle2
utc.save()
utis = list(models.UnitTestInfo.objects.order_by("test_id"))
# test list on its own
self.assertEqual(len(utis), 4)
self.assertListEqual(tests, [x.test for x in utis])
def test_sublist_in_cycle_changed(self):
# create 2 test lisets
test_lists = [utils.create_test_list(name="test list %d" % i) for i in range(2)]
for i, test_list in enumerate(test_lists):
test = utils.create_test(name="test %d" % i)
utils.create_test_list_membership(test_list, test)
# create another test list and add it to the first test list
sub_test = utils.create_test(name="sub")
sub_list = utils.create_test_list(name="sublist")
utils.create_test_list_membership(sub_list, sub_test)
models.Sublist.objects.create(parent=test_lists[0], child=sub_list, order=0)
cycle1 = utils.create_cycle(test_lists=test_lists)
utils.create_unit_test_collection(test_collection=cycle1)
utis = list(models.UnitTestInfo.objects.order_by("test_id"))
# should be 3 unit test infos
assert len(utis) == 3
# now add a new test to the sublist
sub_test2 = utils.create_test(name="sub2")
utils.create_test_list_membership(sub_list, sub_test2)
# should now be 4 utis
utis = list(models.UnitTestInfo.objects.order_by("test_id"))
assert len(utis) == 4
class TestTestInstance(TestCase):
def setUp(self):
self.test = utils.create_test()
self.test_list = utils.create_test_list()
utils.create_test_list_membership(test=self.test, test_list=self.test_list)
self.utc = utils.create_unit_test_collection(test_collection=self.test_list)
self.unit = self.utc.unit
self.uti = models.UnitTestInfo.objects.get(test=self.test, unit=self.utc.unit)
self.tli = utils.create_test_list_instance(unit_test_collection=self.utc)
def test_save(self):
ti = utils.create_test_instance(self.tli, unit_test_info=self.uti)
ti.pass_fail = None
self.assertIsNone(ti.pass_fail)
ti.save()
self.assertIsNotNone(ti.pass_fail)
def test_diff(self):
ref = utils.create_reference(value=1)
ti = utils.create_test_instance(self.tli, unit_test_info=self.uti, value=1)
ti.reference = ref
self.assertEqual(0, ti.difference())
def test_diff_wrap_high_inside(self):
ref = utils.create_reference(value=0)
test = utils.create_test(test_type=models.WRAPAROUND, wrap_high=100, wrap_low=0)
uti = utils.create_unit_test_info(unit=self.unit, test=test, ref=ref)
ti = utils.create_test_instance(self.tli, unit_test_info=uti, value=99)
ti.reference = ref
assert -1 == ti.difference_wraparound()
def test_diff_wrap_high_outside(self):
ref = utils.create_reference(value=0)
test = utils.create_test(test_type=models.WRAPAROUND, wrap_high=100, wrap_low=-100)
uti = utils.create_unit_test_info(unit=self.unit, test=test, ref=ref)
ti = utils.create_test_instance(self.tli, unit_test_info=uti, value=101)
ti.reference = ref
assert 101 == ti.difference_wraparound()
def test_diff_wrap_low_inside(self):
ref = utils.create_reference(value=0)
test = utils.create_test(test_type=models.WRAPAROUND, wrap_high=100, wrap_low=-100)
uti = utils.create_unit_test_info(unit=self.unit, test=test, ref=ref)
ti = utils.create_test_instance(self.tli, unit_test_info=uti, value=-99)
ti.reference = ref
assert -99 == ti.difference_wraparound()
def test_diff_wrap_low_outside(self):
ref = utils.create_reference(value=0)
test = utils.create_test(test_type=models.WRAPAROUND, wrap_high=100, wrap_low=-100)
uti = utils.create_unit_test_info(unit=self.unit, test=test, ref=ref)
ti = utils.create_test_instance(self.tli, unit_test_info=uti, value=-101)
ti.reference = ref
assert -101 == ti.difference_wraparound()
def test_diff_wrap_mid(self):
ref = utils.create_reference(value=0)
test = utils.create_test(test_type=models.WRAPAROUND, wrap_high=100, wrap_low=-100)
uti = utils.create_unit_test_info(unit=self.unit, test=test, ref=ref)
ti = utils.create_test_instance(self.tli, unit_test_info=uti, value=0)
ti.reference = ref
assert 0 == ti.difference_wraparound()
def test_diff_wrap_mid_not_sym(self):
ref = utils.create_reference(value=5)
test = utils.create_test(test_type=models.WRAPAROUND, wrap_high=10, wrap_low=2)
uti = utils.create_unit_test_info(unit=self.unit, test=test, ref=ref)
ti = utils.create_test_instance(self.tli, unit_test_info=uti, value=6)
ti.reference = ref
assert 1 == ti.difference_wraparound()
def test_diff_unavailable(self):
ti = utils.create_test_instance(self.tli, unit_test_info=self.uti, value=1)
self.assertIsNone(ti.calculate_diff())
def test_percent_diff(self):
ref = utils.create_reference(value=1)
ti = utils.create_test_instance(self.tli, unit_test_info=self.uti, value=1.1)
ti.reference = ref
self.assertAlmostEqual(10, ti.percent_difference())
ref.value = 0
self.assertRaises(ZeroDivisionError, ti.percent_difference)
def test_bool_pass_fail(self):
test = utils.create_test(test_type=models.BOOLEAN)
uti = models.UnitTestInfo(test=test)
yes_ref = models.Reference(
type=models.BOOLEAN,
value=True,
)
no_ref = models.Reference(
type=models.BOOLEAN,
value=False,
)
yes_instance = models.TestInstance(value=1, unit_test_info=uti)
no_instance = models.TestInstance(value=0, unit_test_info=uti)
ok_tests = (
(yes_instance, yes_ref),
(no_instance, no_ref),
)
action_tests = (
(no_instance, yes_ref),
(yes_instance, no_ref),
)
for i, ref in ok_tests:
i.reference = ref
i.calculate_pass_fail()
self.assertEqual(models.OK, i.pass_fail)
for i, ref in action_tests:
i.reference = ref
i.calculate_pass_fail()
self.assertEqual(models.ACTION, i.pass_fail)
def test_mult_pass_fail(self):
test = models.Test(type=models.MULTIPLE_CHOICE, choices="a,b,c,d,e")
t = models.Tolerance(type=models.MULTIPLE_CHOICE, mc_pass_choices="a,b", mc_tol_choices="c,d")
uti = models.UnitTestInfo(test=test, tolerance=t)
instance = models.TestInstance(test_list_instance=self.tli, unit_test_info=uti, tolerance=t)
for c in ("a", "b"):
instance.string_value = c
instance.calculate_pass_fail()
self.assertEqual(instance.pass_fail, models.OK)
for c in ("c", "d"):
instance.string_value = c
instance.calculate_pass_fail()
self.assertEqual(instance.pass_fail, models.TOLERANCE)
for c in ("e",):
instance.string_value = c
instance.calculate_pass_fail()
self.assertEqual(instance.pass_fail, models.ACTION)
def test_absolute_pass_fail(self):
test = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=test)
ti = models.TestInstance(unit_test_info=uti)
ref = models.Reference(type=models.NUMERICAL, value=100.)
ti.reference = ref
tol = models.Tolerance(
type=models.ABSOLUTE,
act_low=-3,
tol_low=-2,
tol_high=2,
act_high=3,
)
ti.tolerance = tol
tests = (
(models.ACTION, 96),
(models.ACTION, -100),
(models.ACTION, 1E99),
(models.ACTION, 103.1),
(models.TOLERANCE, 97),
(models.TOLERANCE, 97.5),
(models.TOLERANCE, 102.1),
(models.TOLERANCE, 103),
(models.OK, 100),
(models.OK, 102),
(models.OK, 98),
)
for result, val in tests:
ti.value = val
ti.calculate_pass_fail()
self.assertEqual(result, ti.pass_fail)
def test_absolute_no_action(self):
test = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=test)
ti = models.TestInstance(unit_test_info=uti)
ref = models.Reference(type=models.NUMERICAL, value=100.)
ti.reference = ref
tol = models.Tolerance(
type=models.ABSOLUTE,
tol_low=-2,
tol_high=2,
)
ti.tolerance = tol
tests = (
(models.TOLERANCE, 97),
(models.TOLERANCE, 102.1),
(models.OK, 100),
(models.OK, 102),
(models.OK, 98),
)
for result, val in tests:
ti.value = val
ti.calculate_pass_fail()
self.assertEqual(result, ti.pass_fail)
def test_edge_pass_fail(self):
test = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=test)
ti = models.TestInstance(unit_test_info=uti)
ref = models.Reference(type=models.NUMERICAL, value=5.)
ti.reference = ref
tol = models.Tolerance(
type=models.ABSOLUTE,
act_low=-0.2,
tol_low=-0.1,
tol_high=0.1,
act_high=0.2,
)
ti.tolerance = tol
tests = (
(models.ACTION, 4.79999),
(models.TOLERANCE, 4.799999999999999999),
(models.TOLERANCE, 4.8),
(models.TOLERANCE, 4.89999),
(models.OK, 4.899999999999999999),
(models.OK, 4.9),
(models.OK, 5.1),
(models.OK, 5.10000000000000000000001),
(models.TOLERANCE, 5.10001),
(models.TOLERANCE, 5.2),
(models.TOLERANCE, 5.20000000000000000000001),
(models.ACTION, 5.20001),
)
for result, val in tests:
ti.value = val
ti.calculate_pass_fail()
self.assertEqual(result, ti.pass_fail)
def test_percent_pass_fail(self):
test = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=test)
ti = models.TestInstance(unit_test_info=uti)
ti.reference = models.Reference(type=models.NUMERICAL, value=100.)
ti.tolerance = models.Tolerance(
type=models.PERCENT,
act_low=-3,
tol_low=-2,
tol_high=2,
act_high=3,
)
tests = (
(models.ACTION, 96),
(models.ACTION, -100),
(models.ACTION, 1E99),
(models.ACTION, 103.1),
(models.TOLERANCE, 97),
(models.TOLERANCE, 97.5),
(models.TOLERANCE, 102.1),
(models.TOLERANCE, 103),
(models.OK, 100),
(models.OK, 102),
(models.OK, 98),
)
for result, val in tests:
ti.value = val
ti.calculate_pass_fail()
self.assertEqual(result, ti.pass_fail)
def test_skipped(self):
ti = models.TestInstance(skipped=True)
ti.unit_test_info = models.UnitTestInfo()
ti.unit_test_info.test = models.Test(hidden=False)
ti.calculate_pass_fail()
self.assertEqual(models.NOT_DONE, ti.pass_fail)
def test_in_progress(self):
ti = utils.create_test_instance(self.tli, unit_test_info=self.uti)
ti.test_list_instance.in_progress = True
ti.test_list_instance.save()
self.assertEqual(models.TestInstance.objects.in_progress()[0], ti)
def test_upload_link_none(self):
ti = utils.create_test_instance(self.tli, unit_test_info=self.uti)
self.assertEqual(ti.upload_link(), None)
def test_image_url_none(self):
ti = utils.create_test_instance(self.tli, unit_test_info=self.uti)
self.assertEqual(ti.image_url(), None)
def test_upload_value_display(self):
t = utils.create_test(test_type=models.UPLOAD)
uti = utils.create_unit_test_info(test=t, unit=self.utc.unit, assigned_to=models.Group.objects.latest("id"))
tli = utils.create_test_list_instance(unit_test_collection=self.utc)
ti = utils.create_test_instance(tli, unit_test_info=uti)
# no actual attachment so value display will be None
self.assertEqual(None, ti.value_display())
def test_string_value_display(self):
t = models.Test(type=models.STRING)
uti = models.UnitTestInfo(test=t)
ti = models.TestInstance(unit_test_info=uti)
ti.string_value = "test"
self.assertEqual("test", ti.value_display())
def test_bool_display_value(self):
t = models.Test(type=models.BOOLEAN)
uti = models.UnitTestInfo(test=t)
ti = models.TestInstance(unit_test_info=uti, value=1)
self.assertEqual("Yes", ti.value_display())
ti = models.TestInstance(unit_test_info=uti, value=0)
self.assertEqual("No", ti.value_display())
def test_mc_display_value(self):
t = models.Test(type=models.MULTIPLE_CHOICE, choices="a,b,c")
uti = models.UnitTestInfo(test=t)
ti = models.TestInstance(unit_test_info=uti, string_value="c")
self.assertEqual("c", ti.value_display())
def test_invalid_display_value(self):
t = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=t)
ti = models.TestInstance(unit_test_info=uti, string_value="Invalid")
self.assertEqual("Invalid", ti.value_display())
def test_reg_display_value(self):
t = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=t)
ti = models.TestInstance(unit_test_info=uti, value=0)
self.assertEqual("0", ti.value_display())
ti.skipped = True
self.assertEqual("Skipped", ti.value_display())
ti.skipped = False
ti.value = None
self.assertEqual("Not Done", ti.value_display())
def test_diff_display_no_value(self):
t = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=t)
ti = models.TestInstance(unit_test_info=uti, value=0)
self.assertEqual("", ti.diff_display())
def test_diff_display_absolute(self):
t = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=t)
tol = models.Tolerance(act_high=2, act_low=-2, tol_high=1, tol_low=-1, type=models.ABSOLUTE)
ref = models.Reference(type=models.NUMERICAL, value=100.)
ti = models.TestInstance(unit_test_info=uti, value=0, reference=ref, tolerance=tol)
self.assertEqual("-100", ti.diff_display())
def test_diff_display_percent(self):
t = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=t)
tol = models.Tolerance(act_high=2, act_low=-2, tol_high=1, tol_low=-1, type=models.PERCENT)
ref = models.Reference(type=models.NUMERICAL, value=1.)
ti = models.TestInstance(unit_test_info=uti, value=0.995, reference=ref, tolerance=tol)
self.assertEqual("-0.5%", ti.diff_display())
def test_diff_zero_div(self):
t = models.Test(type=models.SIMPLE)
uti = models.UnitTestInfo(test=t)
tol = models.Tolerance(act_high=2, act_low=-2, tol_high=1, tol_low=-1, type=models.PERCENT)
ref = models.Reference(type=models.NUMERICAL, value=0.)
display = "Zero ref with % diff tol"
ti = models.TestInstance(unit_test_info=uti, value=0.995, reference=ref, tolerance=tol)
self.assertEqual(display, ti.diff_display())
class TestTestListInstance(TestCase):
def setUp(self):
self.tests = []
self.ref = models.Reference(type=models.NUMERICAL, value=100.)
self.tol = models.Tolerance(type=models.PERCENT, act_low=-3, tol_low=-2, tol_high=2, act_high=3)
self.ref.created_by = utils.create_user()
self.tol.created_by = utils.create_user()
self.ref.modified_by = utils.create_user()
self.tol.modified_by = utils.create_user()
self.values = [None, None, 96, 97, 100, 100]
self.statuses = [utils.create_status(name="status%d" % x, slug="status%d" % x) for x in range(len(self.values))]
self.test_list = utils.create_test_list()
for i in range(6):
test = utils.create_test(name="name%d" % i)
self.tests.append(test)
utils.create_test_list_membership(self.test_list, test)
self.unit_test_collection = utils.create_unit_test_collection(test_collection=self.test_list)
self.test_list_instance = self.create_test_list_instance()
def create_test_list_instance(self, work_completed=None):
utc = self.unit_test_collection
tli = utils.create_test_list_instance(unit_test_collection=utc, work_completed=work_completed)
for i, (v, test, status) in enumerate(zip(self.values, self.tests, self.statuses)):
uti = models.UnitTestInfo.objects.get(test=test, unit=utc.unit)
ti = utils.create_test_instance(tli, unit_test_info=uti, value=v, status=status)
ti.reference = self.ref
ti.tolerance = self.tol
ti.test_list_instance = tli
if i == 0:
ti.skipped = True
if i == 1:
ti.tolerance = None
ti.reference = None
else:
ti.reference.save()
ti.tolerance.save()
ti.save()
tli.save()
return tli
def test_pass_fail(self):
pf_status = self.test_list_instance.pass_fail_status()
for pass_fail, _, tests in pf_status:
if pass_fail == models.OK:
self.assertTrue(len(tests) == 2)
else:
self.assertTrue(len(tests) == 1)
def test_review_status(self):
for stat, tests in self.test_list_instance.status():
self.assertEqual(len(tests), 1)
def test_unreviewed_instances(self):
self.assertSetEqual(set(self.test_list_instance.unreviewed_instances()), set(models.TestInstance.objects.all()))
def test_tolerance_tests(self):
self.assertEqual(1, self.test_list_instance.tolerance_tests().count())
def test_failing_tests(self):
self.assertEqual(1, self.test_list_instance.tolerance_tests().count())
def test_in_progress(self):
self.test_list_instance.in_progress = True
self.test_list_instance.save()
self.assertEqual(models.TestListInstance.objects.in_progress()[0], self.test_list_instance)
def test_deleted_signal_tis_deleted(self):
self.test_list_instance.delete()
self.assertEqual(models.TestInstance.objects.count(), 0)
def test_deleted_signal_last_instance_updated(self):
tli = self.create_test_list_instance()
self.unit_test_collection = models.UnitTestCollection.objects.get(pk=self.unit_test_collection.pk)
self.assertEqual(self.unit_test_collection.last_instance, tli)
tli.delete()
self.unit_test_collection = models.UnitTestCollection.objects.get(pk=self.unit_test_collection.pk)
self.assertEqual(self.unit_test_collection.last_instance, self.test_list_instance)
self.test_list_instance.delete()
self.unit_test_collection = models.UnitTestCollection.objects.get(pk=self.unit_test_collection.pk)
self.assertEqual(self.unit_test_collection.last_instance, None)
def test_input_later(self):
dt = timezone.now() + timezone.timedelta(seconds=1)
tli = self.create_test_list_instance(work_completed=dt)
utc = models.UnitTestCollection.objects.get(pk=self.unit_test_collection.pk)
self.assertEqual(utc.last_instance, tli)
tli.work_completed = dt - timezone.timedelta(days=1)
tli.save()
utc = models.UnitTestCollection.objects.get(pk=self.unit_test_collection.pk)
self.assertEqual(utc.last_instance, self.test_list_instance)
class TestAutoReview(TestCase):
def setUp(self):
self.tests = []
self.ref = models.Reference(type=models.NUMERICAL, value=100.)
self.tol = models.Tolerance(type=models.PERCENT, act_low=-3, tol_low=-2, tol_high=2, act_high=3)
self.ref.created_by = utils.create_user()
self.tol.created_by = utils.create_user()
self.ref.modified_by = utils.create_user()
self.tol.modified_by = utils.create_user()
self.values = [96, 97, 100]
self.statuses = [
utils.create_status(name="default", slug="default", requires_review=True, is_default=True),
utils.create_status(name="pass", slug="pass", requires_review=False, is_default=False),
utils.create_status(name="tol", slug="tol", requires_review=False, is_default=False),
utils.create_status(name="fail", slug="fail", requires_review=False, is_default=False),
]
models.AutoReviewRule.objects.bulk_create([
models.AutoReviewRule(pass_fail=models.OK, status=self.statuses[1]),
models.AutoReviewRule(pass_fail=models.TOLERANCE, status=self.statuses[2]),
])
self.ruleset = models.AutoReviewRuleSet.objects.create(name="default", is_default=True)
for rule in models.AutoReviewRule.objects.all():
self.ruleset.rules.add(rule)
self.test_list = utils.create_test_list()
for i in range(3):
test = utils.create_test(name="name%d" % i)
test.autoreviewruleset_id = self.ruleset.id
test.save()
self.tests.append(test)
utils.create_test_list_membership(self.test_list, test)
self.unit_test_collection = utils.create_unit_test_collection(test_collection=self.test_list)
self.test_list_instance = self.create_test_list_instance()
def create_test_list_instance(self):
utc = self.unit_test_collection
tli = utils.create_test_list_instance(unit_test_collection=utc)
self.ref.save()
self.tol.save()
for i, (v, test) in enumerate(zip(self.values, self.tests)):
uti = models.UnitTestInfo.objects.get(test=test, unit=utc.unit)
ti = utils.create_test_instance(tli, unit_test_info=uti, value=v, status=self.statuses[0])
ti.reference = self.ref
ti.tolerance = self.tol
ti.test_list_instance = tli
ti.calculate_pass_fail()
ti.auto_review()
ti.save()
tli.save()
return tli
def test_review_status(self):
"""Each of the three tests should have a different status"""
for stat, tests in self.test_list_instance.status():
self.assertEqual(len(tests), 1)
def test_review_status_with_comment(self):
"""Each of the three tests should have a different status"""
uti = models.UnitTestInfo.objects.get(test=self.tests[0], unit=self.unit_test_collection.unit)
ti = utils.create_test_instance(
self.test_list_instance, unit_test_info=uti, value=self.ref.value, status=self.statuses[0]
)
ti.reference = self.ref
ti.tolerance = self.tol
ti.comment = "comment"
ti.calculate_pass_fail()
ti.auto_review()
ti.save()
self.assertTrue(ti.status.requires_review)
def test_review_status_with_tli_comment(self):
"""Each of the three tests should have a different status"""
uti = models.UnitTestInfo.objects.get(test=self.tests[0], unit=self.unit_test_collection.unit)
ti = utils.create_test_instance(
self.test_list_instance, unit_test_info=uti, value=self.ref.value, status=self.statuses[0]
)
ti.reference = self.ref
ti.tolerance = self.tol
Comment.objects.create(comment="comment", content_object=self.test_list_instance, site_id=1)
# self.test_list_instance.comments.add(c)
# self.test_list_instance.save()
ti.calculate_pass_fail()
ti.auto_review()
ti.save()
self.assertTrue(ti.status.requires_review)
def test_review_status_skipped_hidden(self):
"""Skipped hidden tests should not block auto review"""
uti = models.UnitTestInfo.objects.get(test=self.tests[0], unit=self.unit_test_collection.unit)
uti.test.hidden = True
uti.test.save()
ti = utils.create_test_instance(
self.test_list_instance, unit_test_info=uti, value=self.ref.value, status=self.statuses[0]
)
ti.skipped = True
ti.reference = self.ref
ti.tolerance = self.tol
ti.calculate_pass_fail()
ti.auto_review()
ti.save()
assert not ti.status.requires_review
def test_autoreviewruleset_cache_missing_id(self):
"""Rule is missing, so cache should be refreshed"""
cache.set(settings.CACHE_AUTOREVIEW_RULESETS, {})
r = models.AutoReviewRuleSet.objects.first()
cached = models.autoreviewruleset_cache(r.id)
assert 'ok' in cached and 'tolerance' in cached
def test_arr_str(self):
r = models.AutoReviewRule.objects.get(pass_fail="ok")
assert str(r) == "OK => pass"
def test_arrset_str(self):
assert str(self.ruleset) == "default"
| 39
| 120
| 0.653256
|
9de022545835a99e689a01048bed4f07c359d773
| 784
|
py
|
Python
|
src/python/pants/backend/python/lint/yapf/skip_field.py
|
hephex/pants
|
5798377b2876dc1d5355e407972a441ea7af06d7
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/lint/yapf/skip_field.py
|
hephex/pants
|
5798377b2876dc1d5355e407972a441ea7af06d7
|
[
"Apache-2.0"
] | 12
|
2022-01-06T23:20:22.000Z
|
2022-03-17T05:06:37.000Z
|
src/python/pants/backend/python/lint/yapf/skip_field.py
|
hephex/pants
|
5798377b2876dc1d5355e407972a441ea7af06d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import (
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestsGeneratorTarget,
PythonTestTarget,
)
from pants.engine.target import BoolField
class SkipYapfField(BoolField):
alias = "skip_yapf"
default = False
help = "If true, don't run yapf on this target's code."
def rules():
return [
PythonSourceTarget.register_plugin_field(SkipYapfField),
PythonSourcesGeneratorTarget.register_plugin_field(SkipYapfField),
PythonTestTarget.register_plugin_field(SkipYapfField),
PythonTestsGeneratorTarget.register_plugin_field(SkipYapfField),
]
| 30.153846
| 74
| 0.758929
|
26b4619649b07e0b1e3cfc05ca3b731e540cf69e
| 7,087
|
py
|
Python
|
tools/codegen/gen_vmap_plumbing.py
|
bmahlbrand/functorch
|
b83273b25213f556f05a065163163ba531e24750
|
[
"BSD-3-Clause"
] | null | null | null |
tools/codegen/gen_vmap_plumbing.py
|
bmahlbrand/functorch
|
b83273b25213f556f05a065163163ba531e24750
|
[
"BSD-3-Clause"
] | null | null | null |
tools/codegen/gen_vmap_plumbing.py
|
bmahlbrand/functorch
|
b83273b25213f556f05a065163163ba531e24750
|
[
"BSD-3-Clause"
] | null | null | null |
from tools.codegen.api.types import (
DispatcherSignature,
)
from tools.codegen.model import (
BaseTy, Variant, OptionalType, BaseType, ListType, NativeFunction, Type,
Argument, Return, SchemaKind, Tag
)
from tools.codegen.context import method_with_native_function
from tools.codegen.utils import mapMaybe
from dataclasses import dataclass
from typing import List, Dict, Optional, Tuple, Set, Any, Union, Sequence, TypeVar
import textwrap
def is_tensor(typ: Type) -> bool:
return isinstance(typ, BaseType) and typ.name == BaseTy.Tensor
def is_optional_tensor(typ: Type) -> bool:
return isinstance(typ, OptionalType) and is_tensor(typ.elem)
def is_vector_tensor(typ: Type) -> bool:
return isinstance(typ, ListType) and is_tensor(typ.elem)
def unwrap_tensor(name: str) -> List[str]:
result = f"""\
Tensor {name}_value;
optional<int64_t> {name}_bdim;
std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}, cur_level);"""
return textwrap.dedent(result).split('\n')
def unwrap_optional_tensor(name: str) -> List[str]:
result = f"""\
optional<Tensor> {name}_value;
optional<int64_t> {name}_bdim;
if ({name}) {{
std::tie({name}_value, {name}_bdim) = unwrapTensorAtLevel({name}.value(), cur_level);
}}"""
return textwrap.dedent(result).split('\n')
def gen_unwraps(flat_arguments: List[Argument]) -> Tuple[List[str], List[str]]:
arg_names = [a.name for a in flat_arguments]
arg_types = [a.type for a in flat_arguments]
tensors = [name for typ, name in zip(arg_types, arg_names) if is_tensor(typ)]
optional_tensors = [name for typ, name in zip(arg_types, arg_names) if is_optional_tensor(typ)]
unwraps = []
for tensor in tensors:
unwraps += unwrap_tensor(tensor)
for opt_tensor in optional_tensors:
unwraps += unwrap_optional_tensor(opt_tensor)
unwraps = '\n'.join(unwraps)
unwrapped_arg_list = []
for arg in arg_names:
if arg in tensors or arg in optional_tensors:
unwrapped_arg_list += [f'{arg}_value', f'{arg}_bdim']
else:
unwrapped_arg_list.append(arg)
return unwraps, unwrapped_arg_list
def get_aten_op_call(schema) -> str:
if schema.name.overload_name:
return f'ATEN_FN2({schema.name.name}, {schema.name.overload_name})'
return f'ATEN_FN({schema.name.name})'
def gen_case_where_all_bdims_are_none(flat_args: List[Argument], schema) -> str:
conditions = []
for arg in flat_args:
if not arg.type.is_tensor_like():
continue
conditions.append(f'!isBatchedAtLevel({arg.name}, cur_level)')
aten_op = get_aten_op_call(schema)
arg_names = [a.name for a in flat_args]
return f"""\
if ({' && '.join(conditions)}) {{
return {aten_op}({', '.join(arg_names)});
}}"""
def gen_returns(returns: List[Return]) -> str:
idx = 0
wrapped_returns = []
for ret in returns:
if is_tensor(ret.type):
wrapped_returns.append(f'makeBatched(std::get<{idx}>(results), std::get<{idx + 1}>(results), cur_level)')
idx += 2
elif is_vector_tensor(ret.type):
wrapped_returns.append(
f'makeBatchedVector(std::get<{idx}>(results), std::get<{idx + 1}>(results), cur_level)'
)
idx += 2
else:
wrapped_returns.append(f'std::get<{idx}>(results)')
idx += 1
if len(wrapped_returns) == 1:
wrapped_returns = f'return {wrapped_returns[0]};'
else:
wrapped_returns = f'return std::make_tuple({", ".join(wrapped_returns)});'
return wrapped_returns
def accepts_at_least_one_tensor_input(schema):
for arg in schema.arguments.flat_all:
if arg.type.is_tensor_like():
return True
return False
def gen_vmap_inplace_plumbing(native_function):
schema = native_function.func
sig = DispatcherSignature.from_schema(schema)
returns = schema.returns
assert schema.kind() == SchemaKind.inplace
# Only support cases where all returns are Tensors or vector<Tensor>
if len(returns) == 0:
return None
if not all(is_tensor(ret.type) or is_vector_tensor(ret.type) for ret in returns):
return None
if not accepts_at_least_one_tensor_input(schema):
return None
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all)
bdims_all_none_case = gen_case_where_all_bdims_are_none(schema.arguments.flat_all, schema)
return f"""\
template <typename batch_rule_t, batch_rule_t batch_rule>
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
c10::impl::ExcludeDispatchKeyGuard guard(kBatchedKey);
auto maybe_layer = maybeCurrentDynamicLayer();
TORCH_INTERNAL_ASSERT(maybe_layer.has_value());
int64_t cur_level = maybe_layer->layerId();
{textwrap.indent(bdims_all_none_case, " ")}
{textwrap.indent(unwraps, " ")}
batch_rule({', '.join(unwrapped_arg_list)});
return {schema.arguments.flat_all[0].name};
}}"""
def gen_vmap_plumbing(native_function: NativeFunction) -> str:
schema = native_function.func
sig = DispatcherSignature.from_schema(schema)
returns = schema.returns
# Only support cases where all returns are Tensors or vector<Tensor>
if len(returns) == 0:
return None
if not all(is_tensor(ret.type) or is_vector_tensor(ret.type) for ret in returns):
return None
if not accepts_at_least_one_tensor_input(schema):
return None
# in-place views need special handling
if native_function.tag == Tag.inplace_view:
return None
if schema.kind() == SchemaKind.inplace:
return gen_vmap_inplace_plumbing(native_function)
# Don't support these
if schema.kind() == SchemaKind.out:
return None
unwraps, unwrapped_arg_list = gen_unwraps(schema.arguments.flat_all)
bdims_all_none_case = gen_case_where_all_bdims_are_none(schema.arguments.flat_all, schema)
wrapped_returns = gen_returns(returns)
return f"""\
template <typename batch_rule_t, batch_rule_t batch_rule>
{sig.decl(name=schema.name.unambiguous_name() + '_generated_plumbing')} {{
c10::impl::ExcludeDispatchKeyGuard guard(kBatchedKey);
auto maybe_layer = maybeCurrentDynamicLayer();
TORCH_INTERNAL_ASSERT(maybe_layer.has_value());
int64_t cur_level = maybe_layer->layerId();
{textwrap.indent(bdims_all_none_case, " ")}
{textwrap.indent(unwraps, " ")}
auto results = batch_rule({', '.join(unwrapped_arg_list)});
{wrapped_returns}
}}"""
@dataclass(frozen=True)
class ComputeBatchRulePlumbing:
@method_with_native_function
def __call__(self, f: NativeFunction) -> Optional[str]:
result = gen_vmap_plumbing(f)
return result
def gen_all_vmap_plumbing(native_functions):
body = '\n'.join(list(mapMaybe(ComputeBatchRulePlumbing(), native_functions)))
return f"""
#pragma once
#include <ATen/Operators.h>
#include <functorch/csrc/PlumbingHelper.h>
#include <functorch/csrc/Constants.h>
namespace at {{ namespace functorch {{
{body}
}}}} // namespace at::functorch
"""
| 33.116822
| 117
| 0.695075
|
39aa3ebb047a59808f1501cdffba0b509bbfa967
| 4,679
|
pyw
|
Python
|
Section_04_code/PyQT4/Lib/site-packages/PyQt4/examples/animation/appchooser/appchooser.pyw
|
PacktPublishing/Python-Machine-Learning-Solutions-V-
|
8bb80a43a7c64032c25c1023faaa29bbfbd39d45
|
[
"MIT"
] | 1
|
2022-03-16T02:10:30.000Z
|
2022-03-16T02:10:30.000Z
|
Section_04_code/PyQT4/Lib/site-packages/PyQt4/examples/animation/appchooser/appchooser.pyw
|
wensincai/Python-Machine-Learning-Solutions-V-
|
130c9881757fa90bbb124d48ddd0c6c1136fa20c
|
[
"MIT"
] | null | null | null |
Section_04_code/PyQT4/Lib/site-packages/PyQt4/examples/animation/appchooser/appchooser.pyw
|
wensincai/Python-Machine-Learning-Solutions-V-
|
130c9881757fa90bbb124d48ddd0c6c1136fa20c
|
[
"MIT"
] | 2
|
2019-05-28T11:58:59.000Z
|
2020-09-23T17:21:19.000Z
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
import appchooser_rc
class Pixmap(QtGui.QGraphicsWidget):
clicked = QtCore.pyqtSignal()
def __init__(self, pix, parent=None):
super(Pixmap, self).__init__(parent)
self.orig = QtGui.QPixmap(pix)
self.p = QtGui.QPixmap(pix)
def paint(self, painter, option, widget):
painter.drawPixmap(QtCore.QPointF(), self.p)
def mousePressEvent(self, ev):
self.clicked.emit()
def setGeometry(self, rect):
super(Pixmap, self).setGeometry(rect)
if rect.size().width() > self.orig.size().width():
self.p = self.orig.scaled(rect.size().toSize())
else:
self.p = QtGui.QPixmap(self.orig)
def createStates(objects, selectedRect, parent):
for obj in objects:
state = QtCore.QState(parent)
state.assignProperty(obj, 'geometry', selectedRect)
parent.addTransition(obj.clicked, state)
def createAnimations(objects, machine):
for obj in objects:
animation = QtCore.QPropertyAnimation(obj, 'geometry', obj)
machine.addDefaultAnimation(animation)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
p1 = Pixmap(QtGui.QPixmap(':/digikam.png'))
p2 = Pixmap(QtGui.QPixmap(':/akregator.png'))
p3 = Pixmap(QtGui.QPixmap(':/accessories-dictionary.png'))
p4 = Pixmap(QtGui.QPixmap(':/k3b.png'))
p1.setGeometry(QtCore.QRectF(0.0, 0.0, 64.0, 64.0))
p2.setGeometry(QtCore.QRectF(236.0, 0.0, 64.0, 64.0))
p3.setGeometry(QtCore.QRectF(236.0, 236.0, 64.0, 64.0))
p4.setGeometry(QtCore.QRectF(0.0, 236.0, 64.0, 64.0))
scene = QtGui.QGraphicsScene(0, 0, 300, 300)
scene.setBackgroundBrush(QtCore.Qt.white)
scene.addItem(p1)
scene.addItem(p2)
scene.addItem(p3)
scene.addItem(p4)
window = QtGui.QGraphicsView(scene)
window.setFrameStyle(0)
window.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
window.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
window.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
machine = QtCore.QStateMachine()
machine.setGlobalRestorePolicy(QtCore.QStateMachine.RestoreProperties)
group = QtCore.QState(machine)
selectedRect = QtCore.QRect(86, 86, 128, 128)
idleState = QtCore.QState(group)
group.setInitialState(idleState)
objects = [p1, p2, p3, p4]
createStates(objects, selectedRect, group)
createAnimations(objects, machine)
machine.setInitialState(group)
machine.start()
window.resize(300, 300)
window.show()
sys.exit(app.exec_())
| 34.404412
| 77
| 0.684121
|
4957208efdc8591bcdaa1eb566b3ac3632cc5394
| 234
|
py
|
Python
|
Python/07 - Collections/Word Order.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
Python/07 - Collections/Word Order.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
Python/07 - Collections/Word Order.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
"""
Author: Eda AYDIN
"""
from collections import Counter
n = int(input())
word_list = []
for i in range(n):
word_list.append(input().strip())
count = Counter(word_list)
print(len(count))
print(*count.values())
| 14.625
| 38
| 0.628205
|
d0da1768efc7745ccde30bcd0c71c8c88ab91d5c
| 3,465
|
py
|
Python
|
configs/ttfnet/ttfnet_d53_beta04_2lr_2x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/ttfnet/ttfnet_d53_beta04_2lr_2x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/ttfnet/ttfnet_d53_beta04_2lr_2x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='TTFNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=None,
bbox_head=dict(
type='TTFHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
hm_head_conv_num=2,
wh_head_conv_num=2,
num_classes=81,
wh_offset_base=16,
wh_agnostic=True,
wh_gaussian=True,
shortcut_cfg=(1, 2, 3),
norm_cfg=dict(type='BN'),
alpha=0.54,
beta=0.4,
hm_weight=1.,
wh_weight=5.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=12,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[18, 22])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'ttfnet53_beta04_2lr_2x'
load_from = None
resume_from = 'work_dirs/1908/0818_R4_ttf53_WHH_BETA04_2LR/ttfnet53_beta04_2lr_1x_0818_1638/epoch_9.pth'
workflow = [('train', 1)]
| 31.216216
| 104
| 0.6329
|
ca4cb09f7e0315df213cf92a10dff058f3f234cb
| 2,272
|
py
|
Python
|
CIRCUITPY/qix.py
|
t-ikegami/WioTerminal-CircuitPython
|
efbdc2e13ad969fe009d88f7ec4b836ca61ae973
|
[
"MIT"
] | null | null | null |
CIRCUITPY/qix.py
|
t-ikegami/WioTerminal-CircuitPython
|
efbdc2e13ad969fe009d88f7ec4b836ca61ae973
|
[
"MIT"
] | 1
|
2022-01-19T00:16:02.000Z
|
2022-01-26T03:43:34.000Z
|
CIRCUITPY/qix.py
|
t-ikegami/WioTerminal-CircuitPython
|
efbdc2e13ad969fe009d88f7ec4b836ca61ae973
|
[
"MIT"
] | null | null | null |
import time
import board
import displayio as dpio
import bitmaptools as bmt
from random import randint
from ButtonEvents import ButtonEvents
bmp = dpio.Bitmap(320, 240, 4)
pal = dpio.Palette(4)
pal[1] = 0xFFFF00
pal[2] = 0xFFA500
pal[3] = 0x00A5FF
class Qix :
def __init__(self, n = 10, color = 1) :
self.x0 = randint(0, 279)
self.y0 = randint(0, 199)
self.x1 = self.x0 + randint(0, 40)
self.y1 = self.y0 + randint(0, 40)
self.vx0 = randint(4, 10)
self.vy0 = randint(4, 10)
self.vx1 = randint(4, 10)
self.vy1 = randint(4, 10)
self.color = color
self.queue = [ ( self.x0, self.y0, self.x1, self.y1 ) ] * n
def update(self) :
self.x0 += self.vx0
self.y0 += self.vy0
self.x1 += self.vx1
self.y1 += self.vy1
if self.x0 < 0 : self.vx0 = randint(4, 10)
if self.y0 < 0 : self.vy0 = randint(4, 10)
if self.x1 < 0 : self.vx1 = randint(4, 10)
if self.y1 < 0 : self.vy1 = randint(4, 10)
if self.x0 >= 320 : self.vx0 = -randint(4, 10)
if self.y0 >= 240 : self.vy0 = -randint(4, 10)
if self.x1 >= 320 : self.vx1 = -randint(4, 10)
if self.y1 >= 240 : self.vy1 = -randint(4, 10)
if self.x0 < self.x1 + 50 : self.vx0 += 1; self.vx1 -= 1
if self.y0 < self.y1 + 50 : self.vy0 += 1; self.vy1 -= 1
if self.x1 < self.x0 + 50 : self.vx1 += 1; self.vx0 -= 1
if self.y1 < self.y0 + 50 : self.vy1 += 1; self.vy0 -= 1
self.x0 = max(0, min(self.x0, 319))
self.y0 = max(0, min(self.y0, 239))
self.x1 = max(0, min(self.x1, 319))
self.y1 = max(0, min(self.y1, 239))
p = self.queue.pop()
bmt.draw_line(bmp, *p, value = 0)
p = (self.x0, self.y0, self.x1, self.y1)
self.queue.insert(0, p)
bmt.draw_line(bmp, *p, value = self.color)
q1 = Qix(randint(10, 20), 1)
q2 = Qix(randint(10, 20), 2)
q3 = Qix(randint(10, 20), 3)
disp = board.DISPLAY
g = dpio.Group()
tg = dpio.TileGrid(bmp, pixel_shader = pal)
g.append(tg)
disp.show(g)
be = ButtonEvents()
while True :
time.sleep(0.05)
b = be.buttons()
if b != 0 : break
q1.update()
q2.update()
q3.update()
be.deinit()
| 28.4
| 67
| 0.543574
|
39ade11c8e2d161381d287f74a0998d7441b5fad
| 449
|
py
|
Python
|
school/migrations/0008_auto_20200816_0303.py
|
threecoolcat/ThreeCoolCat
|
805f37b4cc68dce768356c0042663d0659abd6cb
|
[
"MIT"
] | 6
|
2021-08-07T10:51:25.000Z
|
2022-02-25T04:59:02.000Z
|
school/migrations/0008_auto_20200816_0303.py
|
threecoolcat/ThreeCoolCat
|
805f37b4cc68dce768356c0042663d0659abd6cb
|
[
"MIT"
] | null | null | null |
school/migrations/0008_auto_20200816_0303.py
|
threecoolcat/ThreeCoolCat
|
805f37b4cc68dce768356c0042663d0659abd6cb
|
[
"MIT"
] | 2
|
2021-09-15T10:22:05.000Z
|
2022-01-19T07:12:04.000Z
|
# Generated by Django 3.0.7 on 2020-08-16 03:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0007_course_period_amount'),
]
operations = [
migrations.AlterField(
model_name='teacher',
name='photo',
field=models.ImageField(db_column='photo', default='', upload_to='teacherphoto', verbose_name='职位'),
),
]
| 23.631579
| 112
| 0.616927
|
c10f4db46d1075ed422b51ce37f14981a173d551
| 2,342
|
py
|
Python
|
asimtbm/tables/zones.py
|
blakerosenthal/asimtbm
|
367b8ac2b96879b4cbc6fcf970fc03fe936e06c1
|
[
"BSD-3-Clause"
] | 1
|
2020-06-12T00:49:59.000Z
|
2020-06-12T00:49:59.000Z
|
asimtbm/tables/zones.py
|
RSGInc/asimtbm
|
367b8ac2b96879b4cbc6fcf970fc03fe936e06c1
|
[
"BSD-3-Clause"
] | 6
|
2019-09-23T22:34:51.000Z
|
2019-12-31T22:06:25.000Z
|
asimtbm/tables/zones.py
|
RSGInc/asimtbm
|
367b8ac2b96879b4cbc6fcf970fc03fe936e06c1
|
[
"BSD-3-Clause"
] | 1
|
2019-08-30T19:23:11.000Z
|
2019-08-30T19:23:11.000Z
|
import logging
import os
import pandas as pd
from activitysim.core import inject, config, tracing
logger = logging.getLogger(__name__)
ZONE_LABEL = 'zone'
TABLES_YAML = 'tables.yaml'
TABLE_FILENAMES_KEY = 'aggregate_zone_file_names'
@inject.table()
def zones():
"""ActivitySim pipeline table of raw zone data.
Reads zone filenames from tables.yaml and combines
into single table which is then registered to the pipeline.
Each zone file must be the same length and given a 'zone'
index label. If no 'zone' column is found, row numbers will
be used for the zone index.
"""
table_settings = config.read_model_settings(TABLES_YAML)
zone_tables = read_zone_tables(table_settings)
zones_df = combine_zone_tables(zone_tables)
inject.add_table('zones', zones_df)
return zones_df
def read_zone_tables(table_settings):
logger.info('reading tables from configs...')
table_filenames = table_settings.get(TABLE_FILENAMES_KEY)
tables = [read_zone_indexed_csv_file(f) for f in table_filenames]
logger.info('finished reading tables.')
return tables
def combine_zone_tables(zone_tables):
logger.info('building aggregate zones table ...')
# verify that every zone file contains the same zones
comparison_index = zone_tables[0].index
for table in zone_tables:
if not table.index.equals(comparison_index):
raise RuntimeError(
"table with columns %s does not match other zones" % table.columns.values)
combined_zones_df = pd.concat(zone_tables, axis=1)
logger.info('finished building aggregate zones table with %d zones and columns: %s',
len(combined_zones_df.index),
combined_zones_df.columns.values)
return combined_zones_df
def read_zone_indexed_csv_file(file_name):
logger.info('reading file \'%s\'' % file_name)
fpath = config.data_file_path(file_name, mandatory=True)
zone_df = pd.read_csv(fpath, header=0, comment='#')
if ZONE_LABEL in zone_df.columns:
zone_index = ZONE_LABEL # str
else:
# use row numbers for zone ids. convert to 1-based zone ids simply by adding 1
zone_index = zone_df.index + 1 # Series
zone_df.set_index(zone_index, drop=True, inplace=True)
zone_df.index.name = ZONE_LABEL
return zone_df
| 30.025641
| 90
| 0.716055
|
13754ed69e20e144d127b4acf2ec810fa7423bb5
| 9,125
|
py
|
Python
|
multi-task-pretrain/eval_b_classify.py
|
haifangong/CMSA-MTPT-4-MedicalVQA
|
56bdb03820ccf86d164ada6f29cb09f9fa35657b
|
[
"MIT"
] | 11
|
2021-05-28T10:12:30.000Z
|
2022-02-09T04:27:31.000Z
|
multi-task-pretrain/eval_b_classify.py
|
haifangong/CMSA-MTPT-4-MedicalVQA
|
56bdb03820ccf86d164ada6f29cb09f9fa35657b
|
[
"MIT"
] | 4
|
2021-05-31T12:57:04.000Z
|
2022-02-14T02:32:06.000Z
|
multi-task-pretrain/eval_b_classify.py
|
haifangong/CMSA-MTPT-4-MedicalVQA
|
56bdb03820ccf86d164ada6f29cb09f9fa35657b
|
[
"MIT"
] | 1
|
2021-05-28T14:47:24.000Z
|
2021-05-28T14:47:24.000Z
|
import socket
import argparse
from datetime import datetime
import time
import os
import glob
import torch
from torchvision import transforms
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from dataloaders import abdomen_dataset, brain_tumor_dataset, chest_xray_dataset
from dataloaders import custom_transforms as trforms
from model.ResNet import ResNet18, ResNet34, ResNet50, ResNet101
from model.classifier import Classifier
from model.segment_decoder import Decoder
from model.convert import Convert
from model.base_model import build_BAN
import utils
from dataloaders import dictionary
import numpy as np
def softmax(x):
# return np.exp(x) / sum(np.exp(x))
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-gpu', type=str, default='0')
# Model setting
parser.add_argument('-backbone', type=str, default='resnet34')
parser.add_argument('-input_size', type=int, default=224)
parser.add_argument('-dictionary_path', type=str, default='/data1/chenguanqi/Medical-VQA/MICCAI19-MedVQA/data_RAD/dictionary.pkl')
parser.add_argument('-RAD_dir', type=str, default='/data1/chenguanqi/Medical-VQA/MICCAI19-MedVQA/data_RAD')
parser.add_argument('-v_dim', type=int, default=1024)
# Joint representation C dimension
parser.add_argument('--num_hid', type=int, default=1024,
help='dim of joint semantic features')
# BAN - Bilinear Attention Networks
parser.add_argument('--gamma', type=int, default=2,
help='glimpse in Bilinear Attention Networks')
# Choices of RNN models
parser.add_argument('-rnn', type=str, default='LSTM', choices=['LSTM', 'GRU'],
help='the RNN we use')
parser.add_argument('--op', type=str, default='c',
help='concatenated 600-D word embedding')
parser.add_argument('--question_len', default=12, type=int, metavar='N',
help='maximum length of input question')
parser.add_argument('--tfidf', type=bool, default=True,
help='tfidf word embedding?')
# Activation function + dropout for classification module
parser.add_argument('--activation', type=str, default='relu', choices=['relu'],
help='the activation to use for final classifier')
parser.add_argument('--dropout', default=0.5, type=float, metavar='dropout',
help='dropout of rate of final classifier')
parser.add_argument('--clip_norm', default=.25, type=float, metavar='NORM',
help='clip threshold of gradients')
# Training setting
parser.add_argument('-seed', type=int, default=1234)
parser.add_argument('-batch_size', type=int, default=10)
parser.add_argument('-nepochs', type=int, default=100)
parser.add_argument('-resume_epoch', type=int, default=100)
parser.add_argument('-train_fold', type=str, default='BrainTumor_ClipGrad-resnet34-biowordvec-SelfAtt')
parser.add_argument('-run_id', type=int, default=-1)
parser.add_argument('-T', type=int, default=2)
# Optimizer setting
parser.add_argument('-lr', type=float, default=1e-2)
parser.add_argument('-weight_decay', type=float, default=5e-4)
parser.add_argument('-momentum', type=float, default=0.9)
parser.add_argument('-update_lr_every', type=int, default=20)
parser.add_argument('-save_every', type=int, default=5)
parser.add_argument('-log_every', type=int, default=25)
parser.add_argument('-emb_init', type=str, default='biowordvec', choices=['glove', 'biowordvec', 'biosentvec'])
parser.add_argument('-self_att', action='store_true', default=False, help='Use Self Attention?')
parser.add_argument('-result_fold', type=str, default='results')
return parser.parse_args()
def adjust_learning_rate(optimizer, lr_):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
dictionary_ = dictionary.Dictionary.load_from_file(args.dictionary_path)
if args.backbone == 'resnet18':
backbone = ResNet18(nInputChannels=3, os=32, pretrained=True)
elif args.backbone == 'resnet34':
backbone = ResNet34(nInputChannels=3, os=32, pretrained=True)
elif args.backbone == 'resnet50':
backbone = ResNet50(nInputChannels=3, os=32, pretrained=True)
elif args.backbone == 'resnet101':
backbone = ResNet101(nInputChannels=3, os=32, pretrained=True)
else:
raise NotImplementedError
# number of category for task
a_classes = 14 # segmentation
b_classes = 3 # if task is classification else 4 (for segmentation)
c_classes = 2 # classification
if args.backbone == 'resnet18' or args.backbone == 'resnet34':
filters = [64, 64, 128, 256, 512]
elif args.backbone == 'resnet50' or args.backbone == 'resnet101':
filters = [64, 64*4, 128*4, 256*4, 512*4]
else:
raise NotImplementedError
b_decoder = Classifier(in_channels=filters[-1], n_classes=b_classes)
convert = Convert(image_size=args.input_size, backbone_output_dim=filters[-1], os=32, v_dim=args.v_dim)
ban = build_BAN(dictionary=dictionary_, args=args)
save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
if args.run_id >= 0:
run_id = args.run_id
save_dir = os.path.join(save_dir_root, 'run', args.train_fold, 'run_'+str(run_id))
backbone_resume_path = os.path.join(save_dir, 'models', 'backbone_epoch-' + str(args.resume_epoch - 1) + '.pth')
b_decoder_resume_path = os.path.join(save_dir, 'models', 'b_decoder_epoch-' + str(args.resume_epoch - 1) + '.pth')
convert_resume_path = os.path.join(save_dir, 'models', 'convert_epoch-' + str(args.resume_epoch - 1) + '.pth')
ban_resume_path = os.path.join(save_dir, 'models', 'ban_epoch-' + str(args.resume_epoch - 1) + '.pth')
print('Initializing weights from: {}, epoch: {}...'.format(save_dir, args.resume_epoch))
backbone.load_state_dict(torch.load(backbone_resume_path, map_location=lambda storage, loc: storage))
b_decoder.load_state_dict(torch.load(b_decoder_resume_path, map_location=lambda storage, loc: storage))
convert.load_state_dict(torch.load(convert_resume_path, map_location=lambda storage, loc: storage))
ban.load_state_dict(torch.load(ban_resume_path, map_location=lambda storage, loc: storage))
torch.cuda.set_device(device=0)
backbone.cuda()
b_decoder.cuda()
convert.cuda()
ban.cuda()
composed_transforms_ts = transforms.Compose([
trforms.FixedResize(size=(args.input_size, args.input_size)),
trforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
trforms.ToTensor()])
b_valset = brain_tumor_dataset.BrainTumorDataset(dictionary=dictionary_, question_len=12, task='classification', mode='val', transform=composed_transforms_ts, return_size=False, seed=args.seed)
b_valloader = DataLoader(b_valset, batch_size=1, shuffle=False, num_workers=2)
# validation
backbone.eval()
b_decoder.eval()
convert.eval()
ban.eval()
b_acc = 0.0
ques_acc = 0.0
for ii, b_sample_batched in enumerate(b_valloader):
b_img, b_label = b_sample_batched['image'], b_sample_batched['label']
b_img, b_label = b_img.cuda(), b_label.cuda()
b_question, b_question_label = b_sample_batched['question'], b_sample_batched['question_label']
b_question, b_question_label = b_question.cuda(), b_question_label.cuda()
b_feats = backbone.forward(b_img)
b_outs = b_decoder.forward(b_feats[-1])
b_convert = convert.forward(b_feats[-1])
b_ban_out = ban.forward(b_convert, b_question)
b_ban_out = ban.classify(b_ban_out)
acc = utils.cal_acc(b_outs, b_label)
b_acc += acc
acc = utils.cal_acc(b_ban_out, b_question_label)
ques_acc += acc
b_acc /= len(b_valset)
ques_acc /= len(b_valset)
print('Validation:')
print('epoch: %d, b_images: %d b_acc: %.4f ques_acc: %.4f' % (args.resume_epoch, len(b_valset), b_acc, ques_acc))
result_dir = os.path.join(save_dir_root, args.result_fold, args.train_fold, 'run_'+str(args.run_id))
if not os.path.exists(result_dir):
os.makedirs(result_dir)
result_file = open(os.path.join(result_dir, 'result.txt'), 'w')
result_file.write('epoch: %d, b_images: %d b_acc: %.4f ques_acc: %.4f' % (args.resume_epoch, len(b_valset), b_acc, ques_acc))
if __name__ == '__main__':
args = get_arguments()
main(args)
| 44.730392
| 198
| 0.679562
|
2e8b59dbfdcc92b7b1c3d157791d90a9ceba27fb
| 7,104
|
py
|
Python
|
tools/SDKTool/src/ui/debug/ui_debug.py
|
Passer-D/GameAISDK
|
a089330a30b7bfe1f6442258a12d8c0086240606
|
[
"Apache-2.0"
] | 1,210
|
2020-08-18T07:57:36.000Z
|
2022-03-31T15:06:05.000Z
|
tools/SDKTool/src/ui/debug/ui_debug.py
|
guokaiSama/GameAISDK
|
a089330a30b7bfe1f6442258a12d8c0086240606
|
[
"Apache-2.0"
] | 37
|
2020-08-24T02:48:38.000Z
|
2022-01-30T06:41:52.000Z
|
tools/SDKTool/src/ui/debug/ui_debug.py
|
guokaiSama/GameAISDK
|
a089330a30b7bfe1f6442258a12d8c0086240606
|
[
"Apache-2.0"
] | 275
|
2020-08-18T08:35:16.000Z
|
2022-03-31T15:06:07.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import logging
import os
import time
import cv2
from .abstract_debug import AbstractDebug
from ..canvas.data_source import DataSource
from ..tree.project_data_manager import ProjectDataManager
from ...project.project_manager import g_project_manager
from ..utils import set_log_text
from ...common.define import DEBUG_UI_CMD, TASK_PATH, REFER_PATH, SDK_BIN_PATH, TBUS_PATH
from ...communicate.agent_api_mgr import AgentAPIMgr
from ...communicate.protocol import common_pb2
from ...context.app_context import AppContext
from ...subprocess_service.subprocess_service_manager import backend_service_manager as bsa
from ..dialog.tip_dialog import show_warning_tips
from ...common.utils import backend_service_monitor
from ..tree.tree_manager import save_current_data
logger = logging.getLogger('sdktool')
class UIDebug(AbstractDebug):
SERVICE_NAME = 'ui_debug'
def __init__(self):
AbstractDebug.__init__(self)
self.program = DEBUG_UI_CMD
self.api = None
self._last_fps = 10
self.data_source = None
def initialize(self):
""" Initialize
重写基类的initialize函数,初始化tbus
:return:
"""
self.data_source = DataSource()
data_mgr = ProjectDataManager()
if not data_mgr.is_ready():
show_warning_tips('please config project first')
return False
project_config_path = self.get_project_path()
task_path = os.path.join(project_config_path, TASK_PATH)
refer_path = os.path.join(project_config_path, REFER_PATH)
if not os.path.exists(refer_path):
refer_path = None
self.api = AgentAPIMgr()
self.api.initialize(task_path, refer_path, self_addr="SDKToolAddr", cfg_path=TBUS_PATH)
self.set_enabled(True)
return True
def send_frame(self, frame=None):
""" SendFrame
重写基类的send_frame函数,输入为图像帧,将其发送给UIRecognize进程
:param frame:
:return:
"""
src_img_dict = self._generate_img_dict(frame)
ret = self.api.send_ui_src_image(src_img_dict)
if ret is False:
logging.error('send frame failed')
return False
return True
def recv_result(self):
""" RecvResult
重写基类的recv_result函数,从UIRecognize进程接收识别结果,并返回对应的结果图像
:return:
"""
ui_result = self.api.recv_ui_result()
if ui_result is None:
logger.debug("get UI result failed")
return None, False
return self._proc_ui_result(ui_result)
def start_test(self):
"""开始测试,与测试按钮绑定,点击测试按钮则执行此函数
"""
# 每次点击测试按钮时,都要执行各个初始化模块的逻辑
if not self.initialize():
logger.error("initialize failed, please check")
return False
try:
if not save_current_data():
show_warning_tips('保存数据失败,无法启动功能')
return
current_path = os.getcwd()
os.chdir(SDK_BIN_PATH)
time.sleep(1)
prj_file_path = g_project_manager.get_project_property_file()
run_program = "%s/%s cfgpath %s" % (SDK_BIN_PATH, self.program, prj_file_path)
is_ok, desc = bsa.start_service(service_name=self.SERVICE_NAME, run_programs=run_program,
process_param_type=bsa.SUBPROCESS_SHELL_TYPE,
callback_func=backend_service_monitor)
if not is_ok:
logger.error(desc)
return False
os.chdir(current_path)
self._last_fps = self.timer.get_fps()
self.timer.set_fps(1)
self.timer.start()
self.set_enabled(True)
app_ctx = AppContext()
app_ctx.set_info("phone", False)
except RuntimeError as err:
logger.error("start test failed:%s", err)
set_log_text("start test failed:{}".format(err))
return False
return True
def stop_test(self):
""" 停止测试
:return:
"""
super(UIDebug, self).stop_test()
self.timer.set_fps(self._last_fps)
logger.info('stop service:%s', self.SERVICE_NAME)
is_ok, desc = bsa.stop_service(service_name=self.SERVICE_NAME)
if is_ok:
logger.info('stop service %s success', self.SERVICE_NAME)
else:
logger.error('stop service %s failed, %s', self.SERVICE_NAME, desc)
@staticmethod
def _proc_ui_result(ui_result):
""" ProcUIResult
将UIRecognize返回的结果画在图像上'''
:param ui_result:
:return:
"""
ret = False
if ui_result is None:
logger.error('ui_result is None')
return None, ret
frame = ui_result['image']
if frame is None:
logger.error('image is None')
return None, ret
# logger.debug("proc ui result #############frame is {}#############".format(frame))
for action in ui_result['actions']:
ui_type = action.get('type')
logger.info('UI type: %s', ui_type)
if ui_type == common_pb2.PB_UI_ACTION_CLICK:
cv2.putText(frame, "click", (action['points'][0]['x'], action['points'][0]['y']),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
cv2.circle(frame, (action['points'][0]['x'], action['points'][0]['y']), 8, (0, 0, 255), -1)
logger.info('action: click (%s, %s)', action['points'][0]['x'], action['points'][0]['y'])
ret = True
elif ui_type == common_pb2.PB_UI_ACTION_DRAG:
cv2.putText(frame, "drag", (action['points'][0]['x'], action['points'][0]['y']),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
cv2.line(frame, (action['points'][0]['x'], action['points'][0]['y']),
(action['points'][1]['x'], action['points'][1]['y']), (0, 0, 255), 3)
logger.info('action: drag (%s, %s)-->(%s, %s)', action['points'][0]['x'],
action['points'][0]['y'], action['points'][1]['x'], action['points'][1]['y'])
ret = True
return frame, ret
def _generate_img_dict(self, src_img):
""" GenerateImgDict
返回发送图像的结构体
:param src_img:
:return:
"""
src_img_dict = dict()
src_img_dict['frameSeq'] = self.frame_seq
self.frame_seq += 1
src_img_dict['image'] = src_img
src_img_dict['width'] = src_img.shape[1]
src_img_dict['height'] = src_img.shape[0]
src_img_dict['deviceIndex'] = 1
return src_img_dict
| 35.343284
| 111
| 0.594454
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.