hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07984691c22bd4cb824530a6a0921c881974c3e7
| 8,235
|
py
|
Python
|
gsum/datasets.py
|
ckoerber/gsum
|
4174bd622e7a1b829baa1cf1d0857a70cec27537
|
[
"MIT"
] | 7
|
2019-04-25T02:28:51.000Z
|
2022-01-27T22:15:31.000Z
|
gsum/datasets.py
|
jordan-melendez/gsum
|
a9647d60eb5ef971caad88f97df8f1e9cc5286c2
|
[
"MIT"
] | 2
|
2021-07-29T20:12:10.000Z
|
2021-08-10T07:25:26.000Z
|
gsum/datasets.py
|
jordan-melendez/gsum
|
a9647d60eb5ef971caad88f97df8f1e9cc5286c2
|
[
"MIT"
] | 2
|
2019-07-01T07:56:22.000Z
|
2021-11-10T15:36:25.000Z
|
import numpy as np
import scipy.stats as stats
from sklearn.gaussian_process.kernels import RBF
from sklearn.utils import check_random_state
from . import cartesian, partials
def make_gaussian_partial_sums(
X, orders=5, kernel=None, mean=None, ratio=0.3,
ref=1., nugget=0, random_state=0, allow_singular=True
):
R"""
Generates a dataset of Gaussian partial sums at the input points X.
Parameters
----------
X : array, shape = (n_samples, n_features)
The input locations at which to sample the Gaussian process coefficients
orders : int or array, optional (default = 5)
The orders included in the partial sum. If an int is provided, then the partial sums from [0, 1, ..., orders-1]
are generated. If orders is an array, then only the partial sums in `orders` are returned, assuming that any
order not in `orders` does not contribute to the sum (i.e. its coefficient is zero).
kernel : callable
The kernel specifying the covariance function of the GP.
If None is passed, the kernel `RBF(0.5)` is used as default.
mean : callable
The mean function of the series coefficients
ratio : float or callable
The ratio in the geometric sum.
ref : float or callable
The overall scale factor of the geometric sum
nugget : float, optional (default = 0)
Value added to the diagonal of the covariance matrix.
Larger values correspond to increased noise level in the observations.
This can also prevent potential numerical issues, by
ensuring that the calculated values form a positive definite matrix.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
allow_singular : bool, optional (default = True)
Whether to allow a singular covariance matrix.
Returns
-------
X : array, shape = (n_samples, n_features)
The input points.
y : array, shape = (n_samples,)
The response values.
"""
if kernel is None:
kernel = RBF(0.5)
if mean is None:
def mean(a):
return np.zeros(a.shape[0])
if isinstance(orders, int):
orders = np.arange(orders)
if callable(ratio):
ratio = ratio(X)
if callable(ref):
ref = ref(X)
m = mean(X)
K = kernel(X)
K += nugget * np.eye(K.shape[0])
dist = stats.multivariate_normal(mean=m, cov=K, allow_singular=allow_singular)
coeffs = dist.rvs(len(orders), random_state=random_state).T
y = partials(coeffs=coeffs, ratio=ratio, ref=ref, orders=orders)
return y
def make_gaussian_partial_sums_uniform(
n_samples=100, n_features=1, orders=5, kernel=None, mean=None, ratio=0.3, ref=1.,
nugget=0, random_state=0, allow_singular=True
):
R"""
Generates a dataset of Gaussian partial sums at random input locations.
The input X randomly sampled from [0, 1] in n_features dimensions.
Parameters
----------
n_samples : int, optional (default = 100)
The number of samples from each feature dimension.
n_features : int, optional (default = 1)
The number of features.
orders : int or array, optional (default = 5)
The orders included in the partial sum. If an int is provided, then the partial sums from [0, 1, ..., orders-1]
are generated. If orders is an array, then only the partial sums in `orders` are returned, assuming that any
order not in `orders` does not contribute to the sum (i.e. its coefficient is zero).
kernel : callable
The kernel specifying the covariance function of the GP.
If None is passed, the kernel `RBF(0.5)` is used as default.
mean : callable
The mean function of the series coefficients
ratio : float or callable
The ratio in the geometric sum.
ref : float or callable
The overall scale factor of the geometric sum
nugget : float, optional (default = 0)
Value added to the diagonal of the covariance matrix.
Larger values correspond to increased noise level in the observations.
This can also prevent potential numerical issues, by
ensuring that the calculated values form a positive definite matrix.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
allow_singular : bool, optional (default = True)
Whether to allow a singular covariance matrix.
Returns
-------
X : array, shape = (n_samples, n_features)
The input points.
y : array, shape = (n_samples,)
The response values.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = make_gaussian_partial_sums(
X=X, orders=orders, kernel=kernel, mean=mean, ratio=ratio, ref=ref,
nugget=nugget, random_state=random_state, allow_singular=allow_singular
)
return X, y
def make_gaussian_partial_sums_on_grid(
n_samples=100, n_features=1, orders=5, kernel=None, mean=None, ratio=0.3, ref=1.,
nugget=0, random_state=0, allow_singular=True
):
R"""
Generates a dataset of Gaussian partial sums on a full grid.
The input X is n_samples from [0, 1], which is then put on a full grid in n_features dimensions.
Parameters
----------
n_samples : int, optional (default = 100)
The number of samples from each feature dimension.
n_features : int, optional (default = 1)
The number of features.
orders : int or array, optional (default = 5)
The orders included in the partial sum. If an int is provided, then the partial sums from [0, 1, ..., orders-1]
are generated. If orders is an array, then only the partial sums in `orders` are returned, assuming that any
order not in `orders` does not contribute to the sum (i.e. its coefficient is zero).
kernel : callable
The kernel specifying the covariance function of the GP.
If None is passed, the kernel `RBF(0.5)` is used as default.
mean : callable
The mean function of the series coefficients
ratio : float or callable
The ratio in the geometric sum.
ref : float or callable
The overall scale factor of the geometric sum
nugget : float, optional (default = 0)
Value added to the diagonal of the covariance matrix.
Larger values correspond to increased noise level in the observations.
This can also prevent potential numerical issues, by
ensuring that the calculated values form a positive definite matrix.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
allow_singular : bool, optional (default = True)
Whether to allow a singular covariance matrix.
Returns
-------
X : array, shape = (n_samples ** n_features, n_features)
The input points.
y : array, shape = (n_samples ** n_features,)
The response values.
"""
x = np.linspace(0, 1, n_samples)
if n_features > 1:
X = cartesian(*[x for x in range(n_features)])
else:
X = x[:, None]
y = make_gaussian_partial_sums(
X=X, orders=orders, kernel=kernel, mean=mean, ratio=ratio, ref=ref,
nugget=nugget, random_state=random_state, allow_singular=allow_singular
)
return X, y
| 43.115183
| 119
| 0.676381
|
6a92494db1a1e81769e39355ddd6ed7c33df6420
| 760
|
py
|
Python
|
tests/zoomus/components/phone/test_calling_plans.py
|
pjhinton/zoomus
|
0fb4d5f5fd24c35ecaf4035332d0fb41c4c77068
|
[
"Apache-2.0"
] | 1
|
2021-08-19T11:07:06.000Z
|
2021-08-19T11:07:06.000Z
|
tests/zoomus/components/phone/test_calling_plans.py
|
pjhinton/zoomus
|
0fb4d5f5fd24c35ecaf4035332d0fb41c4c77068
|
[
"Apache-2.0"
] | 1
|
2021-03-23T11:27:37.000Z
|
2021-03-23T11:27:37.000Z
|
tests/zoomus/components/phone/test_calling_plans.py
|
pjhinton/zoomus
|
0fb4d5f5fd24c35ecaf4035332d0fb41c4c77068
|
[
"Apache-2.0"
] | 2
|
2021-03-12T05:44:17.000Z
|
2022-03-03T12:27:06.000Z
|
import unittest
from zoomus import components
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CallingPlansV2TestCase))
return suite
class CallingPlansV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.phone.PhoneComponentV2(
base_uri="http://example.com", config={"token": "token"},
)
@responses.activate
def test_numbers(self):
responses.add(
responses.GET,
"http://example.com/phone/calling_plans",
headers={"Authorization": "Bearer token"},
)
self.component.calling_plans()
if __name__ == "__main__":
unittest.main()
| 23.030303
| 69
| 0.646053
|
e15549371bd96bb16a94bfb5c8adae9a0feb40bd
| 896
|
py
|
Python
|
Pygame/Bees/bee.py
|
kasztp/python-lessons
|
2a159ad5e1186c749b96c5d0ede45b7142c6bbb5
|
[
"MIT"
] | 35
|
2015-05-18T08:08:41.000Z
|
2022-03-07T09:38:02.000Z
|
Pygame/Bees/bee.py
|
kasztp/python-lessons
|
2a159ad5e1186c749b96c5d0ede45b7142c6bbb5
|
[
"MIT"
] | 1
|
2021-09-29T02:08:26.000Z
|
2021-09-29T02:08:26.000Z
|
Pygame/Bees/bee.py
|
kasztp/python-lessons
|
2a159ad5e1186c749b96c5d0ede45b7142c6bbb5
|
[
"MIT"
] | 40
|
2015-04-28T00:38:54.000Z
|
2022-02-13T14:18:34.000Z
|
import pygame
from util import loadImages
class Bee(pygame.sprite.Sprite):
images = []
imagesRotated = []
@staticmethod
def loadImages():
Bee.images = loadImages('bee1.png', 'bee2.png', 'bee3.png')
Bee.imagesRotated = Bee.images
def __init__(self, screenRect):
pygame.sprite.Sprite.__init__(self)
self.image = self.images[0]
imgRect = self.image.get_rect()
self.rect = imgRect.move(screenRect.centerx - imgRect.centerx,
screenRect.centery - imgRect.centery)
self.animIdx = 0
def update(self):
self.animIdx = (self.animIdx + 1) % len(self.images)
self.image = self.imagesRotated[self.animIdx]
def setAngle(self, angle):
Bee.imagesRotated = []
for image in Bee.images:
Bee.imagesRotated.append(pygame.transform.rotate(image, angle))
| 32
| 75
| 0.621652
|
a41018dd590dcf4383755cdf7eaaec7c346afafa
| 604
|
py
|
Python
|
hysia/core/hardware/cputil.py
|
fossabot/Video-to-Online-Platform
|
46019562f072a5dc2a92684986411d7f88758882
|
[
"Apache-2.0"
] | 82
|
2019-10-04T05:40:45.000Z
|
2020-03-14T06:40:02.000Z
|
hysia/core/hardware/cputil.py
|
wangyongjie-ntu/Video-to-Online-Platform
|
b1230c8f702487225566b5be13947bd6f7904556
|
[
"Apache-2.0"
] | 9
|
2020-06-04T19:31:59.000Z
|
2021-01-10T02:32:02.000Z
|
hysia/core/hardware/cputil.py
|
wangyongjie-ntu/Video-to-Online-Platform
|
b1230c8f702487225566b5be13947bd6f7904556
|
[
"Apache-2.0"
] | 24
|
2019-10-04T05:46:46.000Z
|
2020-05-30T05:22:32.000Z
|
# 2018-9-30
# Author: Wang Yongjie
# Email: yongjie.wang@ntu.edu.sg
import os
import psutil
class cpu_usage(object):
def __init__(self):
self.cpu_num = 0
self.cpu_frequency = 0
self.cpu_percent = 0
self.cpu_info()
def cpu_info(self):
self.cpu_num = psutil.cpu_count()
self.cpu_frequency = psutil.cpu_freq().current #pstuil.cpu_freq() is a namedtuple object
self.cpu_percent = psutil.cpu_percent()
'''
if __name__ == "__main__":
test = cpu_usage()
print(test.cpu_num)
print(test.cpu_frequency)
print(test.cpu_percent)
'''
| 23.230769
| 96
| 0.652318
|
1ee8ee434287fd43638a8c027dd7927599baf207
| 94
|
py
|
Python
|
Pacotes/ex024.py
|
TonyRio/Python-Exercicios
|
8a72d1b12418c6485794dae184425df0daf098bb
|
[
"MIT"
] | null | null | null |
Pacotes/ex024.py
|
TonyRio/Python-Exercicios
|
8a72d1b12418c6485794dae184425df0daf098bb
|
[
"MIT"
] | null | null | null |
Pacotes/ex024.py
|
TonyRio/Python-Exercicios
|
8a72d1b12418c6485794dae184425df0daf098bb
|
[
"MIT"
] | null | null | null |
cid = str (input('em que cidade vai viajar ? :')).strip()
print(cid.upper()[:5] == 'SANTO')
| 31.333333
| 59
| 0.585106
|
c3cec373e708cccecf2d8848e10e4040f5e4550f
| 1,640
|
py
|
Python
|
services/wikisearch.py
|
RezSat/nova
|
450057295fe40d754fb5e5ead776af21f4d1b8fc
|
[
"Apache-2.0"
] | null | null | null |
services/wikisearch.py
|
RezSat/nova
|
450057295fe40d754fb5e5ead776af21f4d1b8fc
|
[
"Apache-2.0"
] | null | null | null |
services/wikisearch.py
|
RezSat/nova
|
450057295fe40d754fb5e5ead776af21f4d1b8fc
|
[
"Apache-2.0"
] | null | null | null |
import pyttsx3
import speech_recognition as sr
import wolframalpha
import wikipedia
def speak(text):
engine = pyttsx3.init()
# voices = engine.getProperty('voices')
# engine.setProperty('voice', voices[1].id)
engine.setProperty('rate',155) #Speed Percent
engine.setProperty('volume', 20)#Volume
engine.say(text)
engine.runAndWait()
"""
Setting the Speech Recognizer using Google's Speech Recognition Library
"""
def get_audio():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
print("You said: "+ command + '\n')
except sr.UnknownValueError:
print("Error : i don't understand")
speak("i don't understand")
command = get_audio();
return command
command = get_audio()
query = command
try:
app_id = "A76PUG-AQXWU7XKQW"
client = wolframalpha.Client(app_id)
res = client.query(query)
answer = next(res.results).text
print(answer)
speak("Your answer is " + answer)
except:
query = query.split(' ')
query = " ".join(query[0:])
speak("I am searching for " + query)
print(wikipedia.summary(query, sentences = 3))
speak(wikipedia.summary(query, sentences = 3))
query = query.split(' ')
query = " ".join(query[0:])
speak("I am searching for " + query)
print(wikipedia.summary(query, sentences = 3))
speak(wikipedia.summary(query, sentences = 3))
| 25.625
| 73
| 0.631098
|
fdc6879e9c96db48a7f8ad12fb297e11962fdf34
| 760
|
py
|
Python
|
tests/test_sts/test_server.py
|
EvaSDK/moto
|
8095f31772fb144f2045c0991f4c8ed17f324b91
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sts/test_server.py
|
EvaSDK/moto
|
8095f31772fb144f2045c0991f4c8ed17f324b91
|
[
"Apache-2.0"
] | 2
|
2016-07-01T03:43:37.000Z
|
2016-07-18T19:38:06.000Z
|
tests/test_sts/test_server.py
|
EvaSDK/moto
|
8095f31772fb144f2045c0991f4c8ed17f324b91
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import sure # noqa
import moto.server as server
'''
Test the different server responses
'''
def test_sts_get_session_token():
backend = server.create_backend_app("sts")
test_client = backend.test_client()
res = test_client.get('/?Action=GetSessionToken')
res.status_code.should.equal(200)
res.data.should.contain(b"SessionToken")
res.data.should.contain(b"AccessKeyId")
def test_sts_get_federation_token():
backend = server.create_backend_app("sts")
test_client = backend.test_client()
res = test_client.get('/?Action=GetFederationToken&Name=Bob')
res.status_code.should.equal(200)
res.data.should.contain(b"SessionToken")
res.data.should.contain(b"AccessKeyId")
| 26.206897
| 65
| 0.742105
|
2d2825771d42b18e78652918f9c5d6c5fec9c6d7
| 137
|
py
|
Python
|
agent/watcher/__init__.py
|
aaitor/agent
|
835ddf5037b1b6254eda57f056f54195670c17ff
|
[
"Apache-2.0"
] | null | null | null |
agent/watcher/__init__.py
|
aaitor/agent
|
835ddf5037b1b6254eda57f056f54195670c17ff
|
[
"Apache-2.0"
] | null | null | null |
agent/watcher/__init__.py
|
aaitor/agent
|
835ddf5037b1b6254eda57f056f54195670c17ff
|
[
"Apache-2.0"
] | 1
|
2019-08-28T09:19:05.000Z
|
2019-08-28T09:19:05.000Z
|
# -*- coding: utf-8 -*-
"""Watcher package for Agent."""
__author__ = """Aitor"""
__email__ = 'aaitor@gmail.com'
__version__ = '0.1.0'
| 17.125
| 32
| 0.613139
|
4d037d461545e1d494f137956335b7278f760574
| 1,772
|
py
|
Python
|
main.py
|
kcomain/unnamed-launcher
|
ac1ca01f68a59790b6f47947eed4624185f964f1
|
[
"MIT"
] | null | null | null |
main.py
|
kcomain/unnamed-launcher
|
ac1ca01f68a59790b6f47947eed4624185f964f1
|
[
"MIT"
] | null | null | null |
main.py
|
kcomain/unnamed-launcher
|
ac1ca01f68a59790b6f47947eed4624185f964f1
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 kcomain and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import logging
logging_mapping = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
logging.basicConfig(
format='[%(asctime)s %(levelname)s unnamed.%(module)s %(name)s thread %(thread)d] %(message)s',
level=logging_mapping.get(os.environ.get('LOGGING', 'info'), logging.INFO)
)
logging.debug(f'current log level: {logging.root.level}')
# for some reason pyside _might_ be initializing another logger causing the above code to be as useful as padding
# sorry pep8
from unnamed.main import main
if __name__ == "__main__":
main()
| 41.209302
| 113
| 0.746614
|
3b7201de5ca2d4b474b365c0f6761fa0c175530b
| 51,516
|
py
|
Python
|
bokeh/models/tools.py
|
lzxyzq/bokeh
|
a8ca9d17fc34c49ee7ce261d9d5ff0fbd36ca391
|
[
"BSD-3-Clause"
] | 1
|
2018-12-19T06:52:07.000Z
|
2018-12-19T06:52:07.000Z
|
bokeh/models/tools.py
|
lzxyzq/bokeh
|
a8ca9d17fc34c49ee7ce261d9d5ff0fbd36ca391
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/models/tools.py
|
lzxyzq/bokeh
|
a8ca9d17fc34c49ee7ce261d9d5ff0fbd36ca391
|
[
"BSD-3-Clause"
] | null | null | null |
''' Bokeh comes with a number of interactive tools.
There are five types of tool interactions:
.. hlist::
:columns: 5
* Pan/Drag
* Click/Tap
* Scroll/Pinch
* Actions
* Inspectors
For the first three comprise the category of gesture tools, and only
one tool for each gesture can be active at any given time. The active
tool is indicated on the toolbar by a highlight next to to the tool.
Actions are immediate or modal operations that are only activated when
their button in the toolbar is pressed. Inspectors are passive tools that
merely report information or annotate the plot in some way, and may
always be active regardless of what other tools are currently active.
'''
from __future__ import absolute_import
from types import FunctionType
from ..core.enums import (Anchor, Dimension, Dimensions, Location,
TooltipFieldFormatter, TooltipAttachment)
from ..core.has_props import abstract
from ..core.properties import (
Auto, Bool, Color, Date, Datetime, Dict, Either, Enum, Int, Float,
Percent, Instance, List, Seq, String, Tuple
)
from ..util.compiler import nodejs_compile, CompilationError
from ..util.dependencies import import_required
from ..util.future import get_param_info, signature
from ..core.validation import error
from ..core.validation.errors import (
INCOMPATIBLE_BOX_EDIT_RENDERER, INCOMPATIBLE_POINT_DRAW_RENDERER,
INCOMPATIBLE_POLY_DRAW_RENDERER, INCOMPATIBLE_POLY_EDIT_RENDERER,
INCOMPATIBLE_POLY_EDIT_VERTEX_RENDERER, NO_RANGE_TOOL_RANGES
)
from ..model import Model
from .annotations import BoxAnnotation, PolyAnnotation
from .callbacks import Callback
from .glyphs import XYGlyph, Rect, Patches, MultiLine
from .ranges import Range1d
from .renderers import Renderer, GlyphRenderer
from .layouts import LayoutDOM
@abstract
class Tool(Model):
''' A base class for all interactive tool types.
'''
@abstract
class Action(Tool):
''' A base class for tools that are buttons in the toolbar.
'''
pass
@abstract
class Gesture(Tool):
''' A base class for tools that respond to drag events.
'''
pass
@abstract
class Drag(Gesture):
''' A base class for tools that respond to drag events.
'''
pass
@abstract
class Scroll(Gesture):
''' A base class for tools that respond to scroll events.
'''
pass
@abstract
class Tap(Gesture):
''' A base class for tools that respond to tap/click events.
'''
pass
@abstract
class Inspection(Gesture):
''' A base class for tools that perform "inspections", e.g. ``HoverTool``.
'''
toggleable = Bool(True, help="""
Whether an on/off toggle button should appear in the toolbar for this
inpection tool. If ``False``, the viewers of a plot will not be able to
toggle the inspector on or off using the toolbar.
""")
@abstract
class ToolbarBase(Model):
''' A base class for different toolbars.
'''
logo = Enum("normal", "grey", help="""
What version of the Bokeh logo to display on the toolbar. If
set to None, no logo will be displayed.
""")
tools = List(Instance(Tool), help="""
A list of tools to add to the plot.
""")
class Toolbar(ToolbarBase):
''' Collect tools to display for a single plot.
'''
active_drag = Either(Auto, Instance(Drag), help="""
Specify a drag tool to be active when the plot is displayed.
""")
active_inspect = Either(Auto, Instance(Inspection), Seq(Instance(Inspection)), help="""
Specify an inspection tool or sequence of inspection tools to be active when
the plot is displayed.
""")
active_scroll = Either(Auto, Instance(Scroll), help="""
Specify a scroll/pinch tool to be active when the plot is displayed.
""")
active_tap = Either(Auto, Instance(Tap), help="""
Specify a tap/click tool to be active when the plot is displayed.
""")
active_multi = Instance((Gesture), help="""
Specify an active multi-gesture tool, for instance an edit tool or a range
tool.
Note that activating a multi-gesture tool will deactivate any other gesture
tools as appropriate. For example, if a pan tool is set as the active drag,
and this property is set to a ``BoxEditTool`` instance, the pan tool will
be deactivated (i.e. the multi-gesture tool will take precedence).
""")
class ProxyToolbar(ToolbarBase):
''' A toolbar that allow to merge and proxy tools of toolbars in multiple plots. '''
class ToolbarBox(LayoutDOM):
''' A layoutable toolbar that can accept the tools of multiple plots, and
can merge the tools into a single button for convenience.
'''
toolbar = Instance(ToolbarBase, help="""
A toolbar associated with a plot which holds all its tools.
""")
toolbar_location = Enum(Location, default="right")
class PanTool(Drag):
''' *toolbar icon*: |pan_icon|
The pan tool allows the user to pan a Plot by left-dragging
a mouse, or on touch devices by dragging a finger or stylus, across
the plot region.
The pan tool also activates the border regions of a Plot for "single
axis" panning. For instance, dragging in the vertical border or axis
will effect a pan in the vertical direction only, with the horizontal
dimension kept fixed.
.. |pan_icon| image:: /_images/icons/Pan.png
:height: 18pt
'''
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the pan tool is constrained to act in. By default
the pan tool will pan in any dimension, but can be configured to only
pan horizontally across the width of the plot, or vertically across the
height of the plot.
""")
DEFAULT_RANGE_OVERLAY = lambda: BoxAnnotation(
level="overlay",
render_mode="css",
fill_color="lightgrey",
fill_alpha=0.5,
line_color="black",
line_alpha=1.0,
line_width=0.5,
line_dash=[2,2],
)
class RangeTool(Drag):
''' *toolbar icon*: |range_icon|
The range tool allows the user to update range objects for either or both
of the x- or y-dimensions by dragging a corresponding shaded annotation to
move it or change its boundaries.
A common use case is to add this tool to a plot with a large fixed range,
but to configure the tool range from a different plot. When the user
manipulates the overlay, the range of the second plot will be updated
automatically.
.. |range_icon| image:: /_images/icons/Range.png
:height: 18pt
'''
x_range = Instance(Range1d, help="""
A range synchronized to the x-dimension of the overlay. If None, the overlay
will span the entire x-dimension.
""")
x_interaction = Bool(default=True, help="""
Whether to respond to horizontal pan motions when an ``x_range`` is present.
By default, when an ``x_range`` is specified, it is possible to adjust the
horizontal position of the range box by panning horizontally inside the
box, or along the top or bottom edge of the box. To disable this, and fix
the range box in place horizontally, set to False. (The box will still
update if the ``x_range`` is updated programmatically.)
""")
y_range = Instance(Range1d, help="""
A range synchronized to the y-dimension of the overlay. If None, the overlay
will span the entire y-dimension.
""")
y_interaction = Bool(default=True, help="""
Whether to respond to vertical pan motions when a ``y_range`` is present.
By default, when a ``y_range`` is specified, it is possible to adjust the
vertical position of the range box by panning vertically inside the box, or
along the top or bottom edge of the box. To disable this, and fix the range
box in place vertically, set to False. (The box will still update if the
``y_range`` is updated programmatically.)
""")
overlay = Instance(BoxAnnotation, default=DEFAULT_RANGE_OVERLAY, help="""
A shaded annotation drawn to indicate the configured ranges.
""")
@error(NO_RANGE_TOOL_RANGES)
def _check_no_range_tool_ranges(self):
if self.x_range is None and self.y_range is None:
return "At least one of RangeTool.x_range or RangeTool.y_range must be configured"
class WheelPanTool(Scroll):
''' *toolbar icon*: |wheel_pan_icon|
The wheel pan tool allows the user to pan the plot along the configured
dimension using the scroll wheel.
.. |wheel_pan_icon| image:: /_images/icons/WheelPan.png
:height: 18pt
'''
dimension = Enum(Dimension, default="width", help="""
Which dimension the wheel pan tool is constrained to act in. By
default the wheel pan tool will pan the plot along the x-axis.
""")
class WheelZoomTool(Scroll):
''' *toolbar icon*: |wheel_zoom_icon|
The wheel zoom tool will zoom the plot in and out, centered on the
current mouse location.
The wheel zoom tool also activates the border regions of a Plot for
"single axis" zooming. For instance, zooming in the vertical border or
axis will effect a zoom in the vertical direction only, with the
horizontal dimension kept fixed.
.. |wheel_zoom_icon| image:: /_images/icons/WheelZoom.png
:height: 18pt
'''
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the wheel zoom tool is constrained to act in. By
default the wheel zoom tool will zoom in any dimension, but can be
configured to only zoom horizontally across the width of the plot, or
vertically across the height of the plot.
""")
maintain_focus = Bool(default=True, help="""
Whether or not zooming tool maintains its focus position. Setting it
to False results in a more "gliding" behavior, allowing one to
zoom out more smoothly, at the cost of losing the focus position.
""")
zoom_on_axis = Bool(default=True, help="""
Whether scrolling on an axis (outside the central plot area) should
zoom that dimension.
""")
speed = Float(default=1/600, help="""
Speed at which the wheel zooms. Default is 1/600. Optimal range is between
0.001 and 0.09. High values will be clipped. Speed may very between browsers.
""")
class SaveTool(Action):
''' *toolbar icon*: |save_icon|
The save tool is an action. When activated, the tool opens a download dialog
which allows to save an image reproduction of the plot in PNG format. If
automatic download is not support by a web browser, the tool falls back to
opening the generated image in a new tab or window. User then can manually
save it by right clicking on the image and choosing "Save As" (or similar)
menu item.
.. |save_icon| image:: /_images/icons/Save.png
:height: 18pt
'''
class ResetTool(Action):
''' *toolbar icon*: |reset_icon|
The reset tool is an action. When activated in the toolbar, the tool
resets the data bounds of the plot to their values when the plot was
initially created.
Optionally, the reset tool also resets the plat canvas dimensions to
their original size
.. |reset_icon| image:: /_images/icons/Reset.png
:height: 18pt
'''
pass
class TapTool(Tap):
''' *toolbar icon*: |tap_icon|
The tap selection tool allows the user to select at single points by
left-clicking a mouse, or tapping with a finger.
See :ref:`userguide_styling_selected_unselected_glyphs` for information
on styling selected and unselected glyphs.
.. |tap_icon| image:: /_images/icons/Tap.png
:height: 18pt
.. note::
Selections can be comprised of multiple regions, even those
made by different selection tools. Hold down the <<shift>> key
while making a selection to append the new selection to any
previous selection that might exist.
'''
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = Either(Auto, List(Instance(Renderer)), default="auto", help="""
An explicit list of renderers to hit test against. If unset,
defaults to all renderers on a plot.
""")
behavior = Enum("select", "inspect", default="select", help="""
This tool can be configured to either make selections or inspections
on associated data sources. The difference is that selection changes
propagate across bokeh and other components (e.g. selection glyph)
will be notified. Inspecions don't act like this, so it's useful to
configure `callback` when setting `behavior='inspect'`.
""")
callback = Instance(Callback, help="""
A callback to execute *whenever a glyph is "hit"* by a mouse click
or tap.
This is often useful with the :class:`~bokeh.models.callbacks.OpenURL`
model to open URLs based on a user clicking or tapping a specific glyph.
However, it may also be a :class:`~bokeh.models.callbacks.CustomJS`
which can execute arbitrary JavaScript code in response to clicking or
tapping glyphs. The callback will be executed for each individual glyph
that is it hit by a click or tap, and will receive the ``TapTool`` model
as ``cb_obj``. The optional ``cb_data`` will have the data source as
its ``.source`` attribute and the selection geometry as its
``.geometries`` attribute.
The ``.geometries`` attribute has 5 members.
``.type`` is the geometry type, which always a ``.point`` for a tap event.
``.sx`` and ``.sy`` are the screen X and Y coordinates where the tap occurred.
``.x`` and ``.y`` are the converted data coordinates for the item that has
been selected. The ``.x`` and ``.y`` values are based on the axis assiged
to that glyph.
.. note::
This callback does *not* execute on every tap, only when a glyphs is
"hit". If you would like to execute a callback on every mouse tap,
please see :ref:`userguide_interaction_jscallbacks_customjs_interactions`.
""")
class CrosshairTool(Inspection):
''' *toolbar icon*: |crosshair_icon|
The crosshair tool is a passive inspector tool. It is generally on
at all times, but can be configured in the inspector's menu
associated with the *toolbar icon* shown above.
The crosshair tool draws a crosshair annotation over the plot,
centered on the current mouse position. The crosshair tool may be
configured to draw across only one dimension by setting the
``dimension`` property to only ``width`` or ``height``.
.. |crosshair_icon| image:: /_images/icons/Crosshair.png
:height: 18pt
'''
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the crosshair tool is to track. By default, both a
vertical and horizontal line will be drawn. If only "width" is supplied,
only a horizontal line will be drawn. If only "height" is supplied,
only a vertical line will be drawn.
""")
line_color = Color(default="black", help="""
A color to use to stroke paths with.
Acceptable values are:
- any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'``
- an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'``
- a 3-tuple of integers (r,g,b) between 0 and 255
- a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1
.. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp
""")
line_width = Float(default=1, help="""
Stroke width in units of pixels.
""")
line_alpha = Float(default=1.0, help="""
An alpha value to use to stroke paths with.
Acceptable values are floating point numbers between 0 (transparent)
and 1 (opaque).
""")
DEFAULT_BOX_OVERLAY = lambda: BoxAnnotation(
level="overlay",
render_mode="css",
top_units="screen",
left_units="screen",
bottom_units="screen",
right_units="screen",
fill_color="lightgrey",
fill_alpha=0.5,
line_color="black",
line_alpha=1.0,
line_width=2,
line_dash=[4, 4],
)
class BoxZoomTool(Drag):
''' *toolbar icon*: |box_zoom_icon|
The box zoom tool allows users to define a rectangular
region of a Plot to zoom to by dragging he mouse or a
finger over the plot region. The end of the drag
event indicates the selection region is ready.
.. |box_zoom_icon| image:: /_images/icons/BoxZoom.png
:height: 18pt
'''
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the zoom box is to be free in. By default,
users may freely draw zoom boxes with any dimensions. If only
"width" is supplied, the box will be constrained to span the entire
vertical space of the plot, only the horizontal dimension can be
controlled. If only "height" is supplied, the box will be constrained
to span the entire horizontal space of the plot, and the vertical
dimension can be controlled.
""")
overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help="""
A shaded annotation drawn to indicate the selection region.
""")
match_aspect = Bool(default=False, help="""
Whether the box zoom region should be restricted to have the same
aspect ratio as the plot region.
.. note::
If the tool is restricted to one dimension, this value has
no effect.
""")
origin = Enum("corner", "center", default="corner", help="""
Indicates whether the rectangular zoom area should originate from a corner
(top-left or bottom-right depending on direction) or the center of the box.
""")
class ZoomInTool(Action):
''' *toolbar icon*: |zoom_in_icon|
The zoom-in tool allows users to click a button to zoom in
by a fixed amount.
.. |zoom_in_icon| image:: /_images/icons/ZoomIn.png
:height: 18pt
'''
# TODO ZoomInTool dimensions should probably be constrained to be the same as ZoomOutTool
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the zoom-in tool is constrained to act in. By
default the zoom-in zoom tool will zoom in any dimension, but can be
configured to only zoom horizontally across the width of the plot, or
vertically across the height of the plot.
""")
factor = Percent(default=0.1, help="""
Percentage to zoom for each click of the zoom-in tool.
""")
class ZoomOutTool(Action):
''' *toolbar icon*: |zoom_out_icon|
The zoom-out tool allows users to click a button to zoom out
by a fixed amount.
.. |zoom_out_icon| image:: /_images/icons/ZoomOut.png
:height: 18pt
'''
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the zoom-out tool is constrained to act in. By
default the zoom-out tool will zoom in any dimension, but can be
configured to only zoom horizontally across the width of the plot, or
vertically across the height of the plot.
""")
factor = Percent(default=0.1, help="""
Percentage to zoom for each click of the zoom-in tool.
""")
class BoxSelectTool(Drag):
''' *toolbar icon*: |box_select_icon|
The box selection tool allows users to make selections on a
Plot by indicating a rectangular region by dragging the
mouse or a finger over the plot region. The end of the drag
event indicates the selection region is ready.
See :ref:`userguide_styling_selected_unselected_glyphs` for information
on styling selected and unselected glyphs.
.. |box_select_icon| image:: /_images/icons/BoxSelect.png
:height: 18pt
'''
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = Either(Auto, List(Instance(Renderer)), default="auto", help="""
An explicit list of renderers to hit test against. If unset,
defaults to all renderers on a plot.
""")
select_every_mousemove = Bool(False, help="""
Whether a selection computation should happen on every mouse
event, or only once, when the selection region is completed. Default: False
""")
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the box selection is to be free in. By default,
users may freely draw selections boxes with any dimensions. If only
"width" is supplied, the box will be constrained to span the entire
vertical space of the plot, only the horizontal dimension can be
controlled. If only "height" is supplied, the box will be constrained
to span the entire horizontal space of the plot, and the vertical
dimension can be controlled.
""")
callback = Instance(Callback, help="""
A callback to run in the browser on completion of drawing a selection box.
The cb_data parameter that is available to the Callback code will contain
one BoxSelectTool-specific field:
:geometry: object containing the coordinates of the selection box
""")
overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help="""
A shaded annotation drawn to indicate the selection region.
""")
origin = Enum("corner", "center", default="corner", help="""
Indicates whether the rectangular selection area should originate from a corner
(top-left or bottom-right depending on direction) or the center of the box.
""")
DEFAULT_POLY_OVERLAY = lambda: PolyAnnotation(
level="overlay",
xs_units="screen",
ys_units="screen",
fill_color="lightgrey",
fill_alpha=0.5,
line_color="black",
line_alpha=1.0,
line_width=2,
line_dash=[4, 4]
)
class LassoSelectTool(Drag):
''' *toolbar icon*: |lasso_select_icon|
The lasso selection tool allows users to make selections on a
Plot by indicating a free-drawn "lasso" region by dragging the
mouse or a finger over the plot region. The end of the drag
event indicates the selection region is ready.
See :ref:`userguide_styling_selected_unselected_glyphs` for information
on styling selected and unselected glyphs.
.. note::
Selections can be comprised of multiple regions, even those
made by different selection tools. Hold down the <<shift>> key
while making a selection to append the new selection to any
previous selection that might exist.
.. |lasso_select_icon| image:: /_images/icons/LassoSelect.png
:height: 18pt
'''
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = Either(Auto, List(Instance(Renderer)), default="auto", help="""
An explicit list of renderers to hit test against. If unset,
defaults to all renderers on a plot.
""")
select_every_mousemove = Bool(True, help="""
Whether a selection computation should happen on every mouse
event, or only once, when the selection region is completed. Default: True
""")
callback = Instance(Callback, help="""
A callback to run in the browser on every selection of a lasso area.
The cb_data parameter that is available to the Callback code will contain
one LassoSelectTool-specific field:
:geometry: object containing the coordinates of the lasso area
""")
overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help="""
A shaded annotation drawn to indicate the selection region.
""")
class PolySelectTool(Tap):
''' *toolbar icon*: |poly_select_icon|
The polygon selection tool allows users to make selections on a
Plot by indicating a polygonal region with mouse clicks. single
clicks (or taps) add successive points to the definition of the
polygon, and a double click (or tap) indicates the selection
region is ready.
See :ref:`userguide_styling_selected_unselected_glyphs` for information
on styling selected and unselected glyphs.
.. note::
Selections can be comprised of multiple regions, even those
made by different selection tools. Hold down the <<shift>> key
while making a selection to append the new selection to any
previous selection that might exist.
.. |poly_select_icon| image:: /_images/icons/PolygonSelect.png
:height: 18pt
'''
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = Either(Auto, List(Instance(Renderer)), default="auto", help="""
An explicit list of renderers to hit test against. If unset,
defaults to all renderers on a plot.
""")
callback = Instance(Callback, help="""
A callback to run in the browser on completion of drawing a polygon.
The cb_data parameter that is available to the Callback code will contain
one PolySelectTool-specific field:
:geometry: object containing the coordinates of the polygon
""")
overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help="""
A shaded annotation drawn to indicate the selection region.
""")
class CustomJSHover(Model):
''' Define a custom formatter to apply to a hover tool field.
This model can be configured with JavaScript code to format hover tooltips.
The JavaScript code has access to the current value to format, some special
variables, and any format configured on the tooltip. The variable ``value``
will contain the untransformed value. The variable ``special_vars`` will
provide a dict with the following contents:
* ``x`` data-space x-coordinate of the mouse
* ``y`` data-space y-coordinate of the mouse
* ``sx`` screen-space x-coordinate of the mouse
* ``sy`` screen-space y-coordinate of the mouse
* ``data_x`` data-space x-coordinate of the hovered glyph
* ``data_y`` data-space y-coordinate of the hovered glyph
* ``indices`` column indices of all currently hovered glyphs
* ``name`` value of the ``name`` property of the hovered glyph renderer
If the hover is over a "multi" glyph such as ``Patches`` or ``MultiLine``
then a ``segment_index`` key will also be present.
Finally, the value of the format passed in the tooltip specification is
available as the ``format`` variable.
Example:
As an example, the following code adds a custom formatter to format
WebMercator northing coordinates (in meters) as a latitude:
.. code-block:: python
lat_custom = CustomJSHover(code="""
var projections = require("core/util/projections");
var x = special_vars.x
var y = special_vars.y
var coords = projections.wgs84_mercator.inverse([x, y])
return "" + coords[1]
""")
p.add_tools(HoverTool(
tooltips=[( 'lat','@y{custom}' )],
formatters=dict(y=lat_custom)
))
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
@classmethod
def from_py_func(cls, code):
''' Create a CustomJSHover instance from a Python functions. The
function is translated to JavaScript using PScript.
The python functions must have no positional arguments. It's
possible to pass Bokeh models (e.g. a ColumnDataSource) as keyword
arguments to the functions.
The ``code`` function namespace will contain the variable ``value``
(the untransformed value) at render time as well as ``format`` and
``special_vars`` as described in the class description.
Args:
code (function) : a scalar function to transform a single ``value``
Returns:
CustomJSHover
'''
if not isinstance(code, FunctionType):
raise ValueError('CustomJSHover.from_py_func only accepts function objects.')
pscript = import_required('pscript',
'To use Python functions for CustomJSHover, you need PScript ' +
'("conda install -c conda-forge pscript" or "pip install pscript")')
def pscript_compile(code):
sig = signature(code)
all_names, default_values = get_param_info(sig)
if len(all_names) - len(default_values) != 0:
raise ValueError("Function may only contain keyword arguments.")
if default_values and not any([isinstance(value, Model) for value in default_values]):
raise ValueError("Default value must be a Bokeh Model.")
func_kwargs = dict(zip(all_names, default_values))
# Wrap the code attr in a function named `code` and call it
# with arguments that match the `args` attr
code = pscript.py2js(code, 'transformer') + 'return transformer(%s);\n' % ', '.join(all_names)
return code, func_kwargs
jsfunc, func_kwargs = pscript_compile(code)
return cls(code=jsfunc, args=func_kwargs)
@classmethod
def from_coffeescript(cls, code, args={}):
''' Create a CustomJSHover instance from a CoffeeScript snippet.
The function bodies are translated to JavaScript functions using
node and therefore require return statements.
The ``code`` snippet namespace will contain the variable ``value``
(the untransformed value) at render time as well as ``format`` and
``special_vars`` as described in the class description.
Example:
.. code-block:: coffeescript
formatter = CustomJSHover.from_coffeescript("return value + " total")
Args:
code (str) :
A coffeescript snippet to transform a single ``value`` value
Returns:
CustomJSHover
'''
compiled = nodejs_compile(code, lang="coffeescript", file="???")
if "error" in compiled:
raise CompilationError(compiled.error)
return cls(code=compiled.code, args=args)
args = Dict(String, Instance(Model), help="""
A mapping of names to Bokeh plot objects. These objects are made
available to the callback code snippet as the values of named
parameters to the callback.
""")
code = String(default="", help="""
A snippet of JavaScript code to transform a single value. The variable
``value`` will contain the untransformed value and can be expected to be
present in the function namespace at render time. Additionally, the
variable ``special_vars`` will be available, and will provide a dict
with the following contents:
* ``x`` data-space x-coordinate of the mouse
* ``y`` data-space y-coordinate of the mouse
* ``sx`` screen-space x-coordinate of the mouse
* ``sy`` screen-space y-coordinate of the mouse
* ``data_x`` data-space x-coordinate of the hovered glyph
* ``data_y`` data-space y-coordinate of the hovered glyph
* ``indices`` column indices of all currently hovered glyphs
If the hover is over a "multi" glyph such as ``Patches`` or ``MultiLine``
then a ``segment_index`` key will also be present.
Finally, the value of the format passed in the tooltip specification is
available as the ``format`` variable.
The snippet will be made into the body of a function and therefore requires
a return statement.
Example:
.. code-block:: javascript
code = '''
return value + " total"
'''
""")
class HoverTool(Inspection):
''' *toolbar icon*: |crosshair_icon|
The hover tool is a passive inspector tool. It is generally on at
all times, but can be configured in the inspector's menu associated
with the *toolbar icon* shown above.
By default, the hover tool displays informational tooltips whenever
the cursor is directly over a glyph. The data to show comes from the
glyph's data source, and what is to be displayed is configurable with
the ``tooltips`` attribute that maps display names to columns in the
data source, or to special known variables.
Here is an example of how to configure and use the hover tool::
# Add tooltip (name, field) pairs to the tool. See below for a
# description of possible field values.
hover.tooltips = [
("index", "$index"),
("(x,y)", "($x, $y)"),
("radius", "@radius"),
("fill color", "$color[hex, swatch]:fill_color"),
("foo", "@foo"),
("bar", "@bar"),
("baz", "@baz{safe}"),
("total", "@total{$0,0.00}"
]
You can also supply a ``Callback`` to the HoverTool, to build custom
interactions on hover. In this case you may want to turn the tooltips
off by setting ``tooltips=None``.
.. warning::
When supplying a callback or custom template, the explicit intent
of this Bokeh Model is to embed *raw HTML and JavaScript code* for
a browser to execute. If any part of the code is derived from untrusted
user inputs, then you must take appropriate care to sanitize the user
input prior to passing to Bokeh.
Hover tool does not currently work with the following glyphs:
.. hlist::
:columns: 3
* annulus
* arc
* bezier
* image
* image_rgba
* image_url
* oval
* patch
* quadratic
* ray
* text
.. |hover_icon| image:: /_images/icons/Hover.png
:height: 18pt
'''
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = Either(Auto, List(Instance(Renderer)), defatult="auto", help="""
An explicit list of renderers to hit test against. If unset,
defaults to all renderers on a plot.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the input's value changes. The
cb_data parameter that is available to the Callback code will contain two
HoverTool specific fields:
:index: object containing the indices of the hovered points in the data source
:geometry: object containing the coordinates of the hover cursor
""")
tooltips = Either(String, List(Tuple(String, String)),
default=[
("index","$index"),
("data (x, y)","($x, $y)"),
("screen (x, y)","($sx, $sy)"),
], help="""
The (name, field) pairs describing what the hover tool should
display when there is a hit.
Field names starting with "@" are interpreted as columns on the
data source. For instance, "@temp" would look up values to display
from the "temp" column of the data source.
Field names starting with "$" are special, known fields:
:$index: index of hovered point in the data source
:$name: value of the ``name`` property of the hovered glyph renderer
:$x: x-coordinate under the cursor in data space
:$y: y-coordinate under the cursor in data space
:$sx: x-coordinate under the cursor in screen (canvas) space
:$sy: y-coordinate under the cursor in screen (canvas) space
:$color: color data from data source, with the syntax:
``$color[options]:field_name``. The available options
are: 'hex' (to display the color as a hex value), and
'swatch' to also display a small color swatch.
Field names that begin with ``@`` are associated with columns in a
``ColumnDataSource``. For instance the field name ``"@price"`` will
display values from the ``"price"`` column whenever a hover is triggered.
If the hover is for the 17th glyph, then the hover tooltip will
correspondingly display the 17th price value.
Note that if a column name contains spaces, the it must be supplied by
surrounding it in curly braces, e.g. ``@{adjusted close}`` will display
values from a column named ``"adjusted close"``.
Sometimes (especially with stacked charts) it is desirable to allow the
name of the column be specified indirectly. The field name ``@$name`` is
distinguished in that it will look up the ``name`` field on the hovered
glyph renderer, and use that value as the column name. For instance, if
a user hovers with the name ``"US East"``, then ``@$name`` is equivalent to
``@{US East}``.
By default, values for fields (e.g. ``@foo``) are displayed in a basic
numeric format. However it is possible to control the formatting of values
more precisely. Fields can be modified by appending a format specified to
the end in curly braces. Some examples are below.
.. code-block:: python
"@foo{0,0.000}" # formats 10000.1234 as: 10,000.123
"@foo{(.00)}" # formats -10000.1234 as: (10000.123)
"@foo{($ 0.00 a)}" # formats 1230974 as: $ 1.23 m
Specifying a format ``{safe}`` after a field name will override automatic
escaping of the tooltip data source. Any HTML tags in the data tags will
be rendered as HTML in the resulting HoverTool output. See
:ref:`custom_hover_tooltip` for a more detailed example.
``None`` is also a valid value for tooltips. This turns off the
rendering of tooltips. This is mostly useful when supplying other
actions on hover via the callback property.
.. note::
The tooltips attribute can also be configured with a mapping type,
e.g. ``dict`` or ``OrderedDict``. However, if a ``dict`` is used,
the visual presentation order is unspecified.
""").accepts(Dict(String, String), lambda d: list(d.items()))
formatters = Dict(String, Either(Enum(TooltipFieldFormatter), Instance(CustomJSHover)), default=lambda: dict(), help="""
Specify the formatting scheme for data source columns, e.g.
.. code-block:: python
tool.formatters = dict(date="datetime")
will cause format specifications for the "date" column to be interpreted
according to the "datetime" formatting scheme. The following schemed are
available:
:``"numeral"``:
Provides a wide variety of formats for numbers, currency, bytes, times,
and percentages. The full set of formats can be found in the
|NumeralTickFormatter| reference documentation.
:``"datetime"``:
Provides formats for date and time values. The full set of formats is
listed in the |DatetimeTickFormatter| reference documentation.
:``"printf"``:
Provides formats similar to C-style "printf" type specifiers. See the
|PrintfTickFormatter| reference documentation for complete details.
If no formatter is specified for a column name, the default ``"numeral"``
formatter is assumed.
.. |NumeralTickFormatter| replace:: :class:`~bokeh.models.formatters.NumeralTickFormatter`
.. |DatetimeTickFormatter| replace:: :class:`~bokeh.models.formatters.DatetimeTickFormatter`
.. |PrintfTickFormatter| replace:: :class:`~bokeh.models.formatters.PrintfTickFormatter`
""")
mode = Enum("mouse", "hline", "vline", help="""
Whether to consider hover pointer as a point (x/y values), or a
span on h or v directions.
""")
point_policy = Enum("snap_to_data", "follow_mouse", "none", help="""
Whether the tooltip position should snap to the "center" (or other anchor)
position of the associated glyph, or always follow the current mouse cursor
position.
""")
line_policy = Enum("prev", "next", "nearest", "interp", "none",
default="nearest", help="""
When showing tooltips for lines, designates whether the tooltip position
should be the "previous" or "next" points on the line, the "nearest" point
to the current mouse position, or "interpolate" along the line to the
current mouse position.
""")
anchor = Enum(Anchor, default="center", help="""
If point policy is set to `"snap_to_data"`, `anchor` defines the attachment
point of a tooltip. The default is to attach to the center of a glyph.
""")
attachment = Enum(TooltipAttachment, help="""
Whether the tooltip should be displayed to the left or right of the cursor
position or above or below it, or if it should be automatically placed
in the horizontal or vertical dimension.
""")
show_arrow = Bool(default=True, help="""
Whether tooltip's arrow should be showed.
""")
DEFAULT_HELP_TIP = "Click the question mark to learn more about Bokeh plot tools."
DEFAULT_HELP_URL = "https://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#built-in-tools"
class HelpTool(Action):
''' A button tool to provide a "help" link to users.
The hover text can be customized through the ``help_tooltip`` attribute
and the redirect site overridden as well.
'''
help_tooltip = String(default=DEFAULT_HELP_TIP, help="""
Tooltip displayed when hovering over the help icon.
""")
redirect = String(default=DEFAULT_HELP_URL, help="""
Site to be redirected through upon click.
""")
class UndoTool(Action):
''' *toolbar icon*: |undo_icon|
Undo tool allows to restore previous state of the plot.
.. |undo_icon| image:: /_images/icons/Undo.png
:height: 18pt
'''
class RedoTool(Action):
''' *toolbar icon*: |redo_icon|
Redo tool reverses the last action performed by undo tool.
.. |redo_icon| image:: /_images/icons/Redo.png
:height: 18pt
'''
@abstract
class EditTool(Gesture):
''' A base class for all interactive draw tool types.
'''
empty_value = Either(Bool, Int, Float, Date, Datetime, Color, help="""
Defines the value to insert on non-coordinate columns when a new
glyph is inserted into the ColumnDataSource columns, e.g. when a
circle glyph defines 'x', 'y' and 'color' columns, adding a new
point will add the x and y-coordinates to 'x' and 'y' columns and
the color column will be filled with the defined empty value.
""")
renderers = List(Instance(Renderer), help="""
An explicit list of renderers corresponding to scatter glyphs
that may be edited.
""")
class BoxEditTool(EditTool, Drag, Tap):
''' *toolbar icon*: |box_edit_icon|
The BoxEditTool allows drawing, dragging and deleting ``Rect``
glyphs on one or more renderers by editing the underlying
``ColumnDataSource`` data. Like other drawing tools, the renderers
that are to be edited must be supplied explicitly as a list. When
drawing a new box the data will always be added to the
``ColumnDataSource`` on the first supplied renderer.
The tool will automatically modify the columns on the data source
corresponding to the ``x``, ``y``, ``width`` and ``height`` values
of the glyph. Any additional columns in the data source will be
padded with the declared ``empty_value``, when adding a new box.
The supported actions include:
* Add box: Hold shift then click and drag anywhere on the plot or
double tap once to start drawing, move the mouse and double tap
again to finish drawing.
* Move box: Click and drag an existing box, the box will be
dropped once you let go of the mouse button.
* Delete box: Tap a box to select it then press <<backspace>> key
while the mouse is within the plot area.
To **Move** or **Delete** multiple boxes at once:
* Move selection: Select box(es) with <<shift>>+tap (or another
selection tool) then drag anywhere on the plot. Selecting and
then dragging on a specific box will move both.
* Delete selection: Select box(es) with <<shift>>+tap (or another
selection tool) then press <<backspace>> while the mouse is
within the plot area.
.. |box_edit_icon| image:: /_images/icons/BoxEdit.png
:height: 18pt
'''
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the box drawing is to be free in. By default,
users may freely draw boxes with any dimensions. If only "width"
is supplied, the box will be constrained to span the entire
vertical space of the plot, only the horizontal dimension can be
controlled. If only "height" is supplied, the box will be
constrained to span the entire horizontal space of the plot, and
the vertical dimension can be controlled.
""")
@error(INCOMPATIBLE_BOX_EDIT_RENDERER)
def _check_compatible_renderers(self):
incompatible_renderers = []
for renderer in self.renderers:
if not isinstance(renderer.glyph, Rect):
incompatible_renderers.append(renderer)
if incompatible_renderers:
glyph_types = ', '.join([type(renderer.glyph).__name__ for renderer in incompatible_renderers])
return "%s glyph type(s) found." % glyph_types
class PointDrawTool(EditTool, Drag, Tap):
''' *toolbar icon*: |point_draw_icon|
The PointDrawTool allows adding, dragging and deleting point-like
glyphs (of ``XYGlyph`` type) on one or more renderers by editing the
underlying ``ColumnDataSource`` data. Like other drawing tools, the
renderers that are to be edited must be supplied explicitly as a list.
Any newly added points will be inserted on the ``ColumnDataSource`` of
the first supplied renderer.
The tool will automatically modify the columns on the data source
corresponding to the ``x`` and ``y`` values of the glyph. Any additional
columns in the data source will be padded with the given ``empty_value``
when adding a new point.
.. note::
The data source updates will trigger data change events continuously
throughout the edit operations on the BokehJS side. In Bokeh server
apps, the data source will only be synced once, when the edit operation
finishes.
The supported actions include:
* Add point: Tap anywhere on the plot
* Move point: Tap and drag an existing point, the point will be
dropped once you let go of the mouse button.
* Delete point: Tap a point to select it then press <<backspace>>
key while the mouse is within the plot area.
.. |point_draw_icon| image:: /_images/icons/PointDraw.png
:height: 18pt
'''
add = Bool(default=True, help="""
Enables adding of new points on tap events.""")
drag = Bool(default=True, help="""
Enables dragging of existing points on pan events.""")
@error(INCOMPATIBLE_POINT_DRAW_RENDERER)
def _check_compatible_renderers(self):
incompatible_renderers = []
for renderer in self.renderers:
if not isinstance(renderer.glyph, XYGlyph):
incompatible_renderers.append(renderer)
if incompatible_renderers:
glyph_types = ', '.join([type(renderer.glyph).__name__ for renderer in incompatible_renderers])
return "%s glyph type(s) found." % glyph_types
class PolyDrawTool(EditTool, Drag, Tap):
''' *toolbar icon*: |poly_draw_icon|
The PolyDrawTool allows drawing, selecting and deleting
``Patches`` and ``MultiLine`` glyphs on one or more renderers by
editing the underlying ColumnDataSource data. Like other drawing
tools, the renderers that are to be edited must be supplied
explicitly as a list.
The tool will automatically modify the columns on the data source
corresponding to the ``xs`` and ``ys`` values of the glyph. Any
additional columns in the data source will be padded with the
declared ``empty_value``, when adding a new point.
The supported actions include:
* Add patch/multi-line: Double tap to add the first vertex, then
use tap to add each subsequent vertex, to finalize the draw
action double tap to insert the final vertex or press the <<esc>
key.
* Move patch/multi-line: Tap and drag an existing
patch/multi-line, the point will be dropped once you let go of
the mouse button.
* Delete patch/multi-line: Tap a patch/multi-line to select it
then press <<backspace>> key while the mouse is within the plot
area.
.. |poly_draw_icon| image:: /_images/icons/PolyDraw.png
:height: 18pt
'''
drag = Bool(default=True, help="""
Enables dragging of existing patches and multi-lines on pan events.""")
@error(INCOMPATIBLE_POLY_DRAW_RENDERER)
def _check_compatible_renderers(self):
incompatible_renderers = []
for renderer in self.renderers:
if not isinstance(renderer.glyph, (MultiLine, Patches)):
incompatible_renderers.append(renderer)
if incompatible_renderers:
glyph_types = ', '.join([type(renderer.glyph).__name__ for renderer in incompatible_renderers])
return "%s glyph type(s) found." % glyph_types
class PolyEditTool(EditTool, Drag, Tap):
''' *toolbar icon*: |poly_edit_icon|
The PolyEditTool allows editing the vertices of one or more
``Patches`` or ``MultiLine`` glyphs. The glyphs to be edited can
be defined via the ``renderers`` property and the renderer for the
vertices can be defined via the ``vertex_renderer``, which must
render a point-like Glyph (of ``XYGlyph`` type).
The tool will automatically modify the columns on the data source
corresponding to the ``xs`` and ``ys`` values of the glyph. Any
additional columns in the data source will be padded with the
declared ``empty_value``, when adding a new point.
The supported actions include:
* Show vertices: Double tap an existing patch or multi-line
* Add vertex: Double tap an existing vertex to select it, the tool
will draw the next point, to add it tap in a new location. To
finish editing and add a point double tap otherwise press the
<<esc> key to cancel.
* Move vertex: Drag an existing vertex and let go of the mouse
button to release it.
* Delete vertex: After selecting one or more vertices press
<<backspace>> while the mouse cursor is within the plot area.
.. |poly_edit_icon| image:: /_images/icons/PolyEdit.png
:height: 18pt
'''
vertex_renderer = Instance(GlyphRenderer, help="""
The renderer used to render the vertices of a selected line or
polygon.""")
@error(INCOMPATIBLE_POLY_EDIT_VERTEX_RENDERER)
def _check_compatible_vertex_renderer(self):
glyph = self.vertex_renderer.glyph
if not isinstance(glyph, XYGlyph):
return "glyph type %s found." % type(glyph).__name__
@error(INCOMPATIBLE_POLY_EDIT_RENDERER)
def _check_compatible_renderers(self):
incompatible_renderers = []
for renderer in self.renderers:
if not isinstance(renderer.glyph, (MultiLine, Patches)):
incompatible_renderers.append(renderer)
if incompatible_renderers:
glyph_types = ', '.join([type(renderer.glyph).__name__
for renderer in incompatible_renderers])
return "%s glyph type(s) found." % glyph_types
| 37.222543
| 124
| 0.679925
|
c53e359a8e828dcd496ab1b8e52097596e1b2177
| 6,239
|
py
|
Python
|
face_sdk/models/network_def/mobilev3_pfld.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 1,329
|
2021-01-13T07:06:30.000Z
|
2022-03-31T07:23:39.000Z
|
face_sdk/models/network_def/mobilev3_pfld.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 115
|
2021-01-13T10:42:57.000Z
|
2022-03-28T03:57:52.000Z
|
face_sdk/models/network_def/mobilev3_pfld.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 351
|
2021-01-13T07:21:00.000Z
|
2022-03-29T14:11:39.000Z
|
# derive from:
# https://github.com/Hsintao/pfld_106_face_landmarks/blob/master/models/mobilev3_pfld.py
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv_bn(inp, oup, kernel_size, stride, padding=1, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, kernel_size, stride, padding, bias=False),
norm_layer(oup),
nlin_layer(inplace=True)
)
def conv_1x1_bn(inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
nlin_layer(inplace=True)
)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3., inplace=self.inplace) / 6.
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3., inplace=self.inplace) / 6.
class SEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
Hsigmoid()
)
def forward(self, x):
b, c, h, w = x.size()
# F.avg_pool2d()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class Identity(nn.Module):
def __init__(self, channel):
super(Identity, self).__init__()
def forward(self, x):
return x
class MobileBottleneck(nn.Module):
def __init__(self, inp, oup, kernel, stride, exp, se=False, nl='RE'):
super(MobileBottleneck, self).__init__()
assert stride in [1, 2]
assert kernel in [3, 5]
padding = (kernel - 1) // 2
self.use_res_connect = stride == 1 and inp == oup
conv_layer = nn.Conv2d
norm_layer = nn.BatchNorm2d
if nl == 'RE':
nlin_layer = nn.ReLU # or ReLU6
elif nl == 'HS':
nlin_layer = Hswish
else:
raise NotImplementedError
if se:
SELayer = SEModule
else:
SELayer = Identity
self.conv = nn.Sequential(
# pw
conv_layer(inp, exp, 1, 1, 0, bias=False),
norm_layer(exp),
nlin_layer(inplace=True),
# dw
conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False),
norm_layer(exp),
SELayer(exp),
nlin_layer(inplace=True),
# pw-linear
conv_layer(exp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class PFLDInference(nn.Module):
def __init__(self):
super(PFLDInference, self).__init__()
self.use_attention = True
self.conv_bn1 = conv_bn(3, 16, 3, stride=1, nlin_layer=Hswish)
self.conv_bn2 = MobileBottleneck(16, 16, 3, 1, 16, False, 'RE')
self.conv3_1 = MobileBottleneck(16, 24, 3, 2, 64, False, 'RE')
self.block3_2 = MobileBottleneck(24, 24, 3, 1, 72, False, "RE")
self.block3_3 = MobileBottleneck(24, 40, 5, 2, 72, self.use_attention, "RE")
self.block3_4 = MobileBottleneck(40, 40, 5, 1, 120, self.use_attention, "RE")
self.block3_5 = MobileBottleneck(40, 40, 5, 1, 120, self.use_attention, "RE")
self.conv4_1 = MobileBottleneck(40, 80, 3, 2, 240, False, "RE")
self.conv5_1 = MobileBottleneck(80, 80, 3, 1, 200, False, "HS")
self.block5_2 = MobileBottleneck(80, 112, 3, 1, 480, self.use_attention, "HS")
self.block5_3 = MobileBottleneck(112, 112, 3, 1, 672, self.use_attention, "HS")
self.block5_4 = MobileBottleneck(112, 160, 3, 1, 672, self.use_attention, "HS")
self.conv6_1 = MobileBottleneck(160, 16, 3, 1, 320, False, "HS") # [16, 14, 14]
self.conv7 = nn.Conv2d(16, 32, 3, 2, padding=1)
self.conv8 = nn.Conv2d(32, 128, 7, 1, 0)
self.avg_pool1 = nn.AvgPool2d(14)
self.avg_pool2 = nn.AvgPool2d(7)
self.fc = nn.Linear(176, 106 * 2)
def forward(self, x): # x: 3, 112, 112
x = self.conv_bn1(x) # [64, 56, 56]
x = self.conv_bn2(x) # [64, 56, 56]
x = self.conv3_1(x)
x = self.block3_2(x)
x = self.block3_3(x)
x = self.block3_4(x)
out1 = self.block3_5(x)
x = self.conv4_1(out1)
x = self.conv5_1(x)
x = self.block5_2(x)
x = self.block5_3(x)
x = self.block5_4(x)
x = self.conv6_1(x)
x1 = self.avg_pool1(x)
x1 = x1.view(x1.size(0), -1)
x = self.conv7(x)
x2 = self.avg_pool2(x)
x2 = x2.view(x2.size(0), -1)
x3 = self.conv8(x)
x3 = x3.view(x1.size(0), -1)
multi_scale = torch.cat([x1, x2, x3], 1)
landmarks = self.fc(multi_scale)
return out1, landmarks
class AuxiliaryNet(nn.Module):
def __init__(self):
super(AuxiliaryNet, self).__init__()
self.conv1 = conv_bn(40, 128, 3, 2)
self.conv2 = conv_bn(128, 128, 3, 1)
self.conv3 = conv_bn(128, 32, 3, 2)
self.conv4 = conv_bn(32, 128, 3, 1, padding=0)
self.max_pool1 = nn.MaxPool2d(5)
self.fc1 = nn.Linear(128, 32)
self.fc2 = nn.Linear(32, 3)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.max_pool1(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
return x
| 31.831633
| 124
| 0.550088
|
2530264fb2f22e1ca3872d204d157655cf3f37e5
| 227
|
py
|
Python
|
ehmatthes-pcc_2e-078318e/chapter_08/pets.py
|
charliechocho/py-crash-course
|
b42b8a4c1cb8d76e8316f55c1565ff42d920ee63
|
[
"Apache-2.0"
] | 12
|
2020-10-22T14:03:27.000Z
|
2022-03-28T08:14:22.000Z
|
ehmatthes-pcc_2e-078318e/chapter_08/pets.py
|
charliechocho/py-crash-course
|
b42b8a4c1cb8d76e8316f55c1565ff42d920ee63
|
[
"Apache-2.0"
] | 4
|
2020-12-26T15:08:02.000Z
|
2021-05-16T13:19:33.000Z
|
ehmatthes-pcc_2e-078318e/chapter_08/pets.py
|
charliechocho/py-crash-course
|
b42b8a4c1cb8d76e8316f55c1565ff42d920ee63
|
[
"Apache-2.0"
] | 9
|
2020-12-22T10:22:12.000Z
|
2022-03-28T08:14:53.000Z
|
def describe_pet(pet_name, animal_type='dog'):
"""Display information about a pet."""
print(f"\nI have a {animal_type}.")
print(f"My {animal_type}'s name is {pet_name.title()}.")
describe_pet(pet_name='willie')
| 37.833333
| 60
| 0.674009
|
0253daf80e74b701cecb86f7bca58da8662c9342
| 16,026
|
py
|
Python
|
aiohttp/test_utils.py
|
mosquito/aiohttp
|
cade625895a0a098981d8f1b52f78e49b4aae30e
|
[
"Apache-2.0"
] | null | null | null |
aiohttp/test_utils.py
|
mosquito/aiohttp
|
cade625895a0a098981d8f1b52f78e49b4aae30e
|
[
"Apache-2.0"
] | null | null | null |
aiohttp/test_utils.py
|
mosquito/aiohttp
|
cade625895a0a098981d8f1b52f78e49b4aae30e
|
[
"Apache-2.0"
] | null | null | null |
"""Utilities shared by tests."""
import asyncio
import contextlib
import functools
import gc
import socket
import sys
import unittest
from abc import ABC, abstractmethod
from unittest import mock
from multidict import CIMultiDict
from yarl import URL
import aiohttp
from aiohttp.client import _RequestContextManager, _WSRequestContextManager
from . import ClientSession, hdrs
from .helpers import sentinel
from .http import HttpVersion, RawRequestMessage
from .signals import Signal
from .web import (AppRunner, Request, Server, ServerRunner, TCPSite,
UrlMappingMatchInfo)
def unused_port():
"""Return a port that is unused on the current host."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 0))
return s.getsockname()[1]
class BaseTestServer(ABC):
def __init__(self, *, scheme=sentinel, loop=None,
host='127.0.0.1', port=None, skip_url_asserts=False,
**kwargs):
self._loop = loop
self.runner = None
self._root = None
self.host = host
self.port = port
self._closed = False
self.scheme = scheme
self.skip_url_asserts = skip_url_asserts
async def start_server(self, loop=None, **kwargs):
if self.runner:
return
self._loop = loop
self._ssl = kwargs.pop('ssl', None)
self.runner = await self._make_runner(**kwargs)
await self.runner.setup()
if not self.port:
self.port = unused_port()
site = TCPSite(self.runner, host=self.host, port=self.port,
ssl_context=self._ssl)
await site.start()
if self.scheme is sentinel:
if self._ssl:
scheme = 'https'
else:
scheme = 'http'
self.scheme = scheme
self._root = URL('{}://{}:{}'.format(self.scheme,
self.host,
self.port))
@abstractmethod # pragma: no cover
async def _make_runner(self, **kwargs):
pass
def make_url(self, path):
url = URL(path)
if not self.skip_url_asserts:
assert not url.is_absolute()
return self._root.join(url)
else:
return URL(str(self._root) + path)
@property
def started(self):
return self.runner is not None
@property
def closed(self):
return self._closed
@property
def handler(self):
# for backward compatibility
# web.Server instance
return self.runner.server
async def close(self):
"""Close all fixtures created by the test client.
After that point, the TestClient is no longer usable.
This is an idempotent function: running close multiple times
will not have any additional effects.
close is also run when the object is garbage collected, and on
exit when used as a context manager.
"""
if self.started and not self.closed:
await self.runner.cleanup()
self._root = None
self.port = None
self._closed = True
def __enter__(self):
raise TypeError("Use async with instead")
def __exit__(self, exc_type, exc_value, traceback):
# __exit__ should exist in pair with __enter__ but never executed
pass # pragma: no cover
async def __aenter__(self):
await self.start_server(loop=self._loop)
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
class TestServer(BaseTestServer):
def __init__(self, app, *,
scheme=sentinel, host='127.0.0.1', port=None, **kwargs):
self.app = app
super().__init__(scheme=scheme, host=host, port=port, **kwargs)
async def _make_runner(self, **kwargs):
return AppRunner(self.app, **kwargs)
class RawTestServer(BaseTestServer):
def __init__(self, handler, *,
scheme=sentinel, host='127.0.0.1', port=None, **kwargs):
self._handler = handler
super().__init__(scheme=scheme, host=host, port=port, **kwargs)
async def _make_runner(self, debug=True, **kwargs):
srv = Server(
self._handler, loop=self._loop, debug=True, **kwargs)
return ServerRunner(srv, debug=debug, **kwargs)
class TestClient:
"""
A test client implementation.
To write functional tests for aiohttp based servers.
"""
def __init__(self, server, *, cookie_jar=None, loop=None, **kwargs):
if not isinstance(server, BaseTestServer):
raise TypeError("server must be web.Application TestServer "
"instance, found type: %r" % type(server))
self._server = server
self._loop = loop
if cookie_jar is None:
cookie_jar = aiohttp.CookieJar(unsafe=True, loop=loop)
self._session = ClientSession(loop=loop,
cookie_jar=cookie_jar,
**kwargs)
self._closed = False
self._responses = []
self._websockets = []
async def start_server(self):
await self._server.start_server(loop=self._loop)
@property
def host(self):
return self._server.host
@property
def port(self):
return self._server.port
@property
def server(self):
return self._server
@property
def session(self):
"""An internal aiohttp.ClientSession.
Unlike the methods on the TestClient, client session requests
do not automatically include the host in the url queried, and
will require an absolute path to the resource.
"""
return self._session
def make_url(self, path):
return self._server.make_url(path)
async def request(self, method, path, *args, **kwargs):
"""Routes a request to tested http server.
The interface is identical to asyncio.ClientSession.request,
except the loop kwarg is overridden by the instance used by the
test server.
"""
resp = await self._session.request(
method, self.make_url(path), *args, **kwargs
)
# save it to close later
self._responses.append(resp)
return resp
def get(self, path, *args, **kwargs):
"""Perform an HTTP GET request."""
return _RequestContextManager(
self.request(hdrs.METH_GET, path, *args, **kwargs)
)
def post(self, path, *args, **kwargs):
"""Perform an HTTP POST request."""
return _RequestContextManager(
self.request(hdrs.METH_POST, path, *args, **kwargs)
)
def options(self, path, *args, **kwargs):
"""Perform an HTTP OPTIONS request."""
return _RequestContextManager(
self.request(hdrs.METH_OPTIONS, path, *args, **kwargs)
)
def head(self, path, *args, **kwargs):
"""Perform an HTTP HEAD request."""
return _RequestContextManager(
self.request(hdrs.METH_HEAD, path, *args, **kwargs)
)
def put(self, path, *args, **kwargs):
"""Perform an HTTP PUT request."""
return _RequestContextManager(
self.request(hdrs.METH_PUT, path, *args, **kwargs)
)
def patch(self, path, *args, **kwargs):
"""Perform an HTTP PATCH request."""
return _RequestContextManager(
self.request(hdrs.METH_PATCH, path, *args, **kwargs)
)
def delete(self, path, *args, **kwargs):
"""Perform an HTTP PATCH request."""
return _RequestContextManager(
self.request(hdrs.METH_DELETE, path, *args, **kwargs)
)
def ws_connect(self, path, *args, **kwargs):
"""Initiate websocket connection.
The api corresponds to aiohttp.ClientSession.ws_connect.
"""
return _WSRequestContextManager(
self._ws_connect(path, *args, **kwargs)
)
async def _ws_connect(self, path, *args, **kwargs):
ws = await self._session.ws_connect(
self.make_url(path), *args, **kwargs)
self._websockets.append(ws)
return ws
async def close(self):
"""Close all fixtures created by the test client.
After that point, the TestClient is no longer usable.
This is an idempotent function: running close multiple times
will not have any additional effects.
close is also run on exit when used as a(n) (asynchronous)
context manager.
"""
if not self._closed:
for resp in self._responses:
resp.close()
for ws in self._websockets:
await ws.close()
await self._session.close()
await self._server.close()
self._closed = True
def __enter__(self):
raise TypeError("Use async with instead")
def __exit__(self, exc_type, exc_value, traceback):
# __exit__ should exist in pair with __enter__ but never executed
pass # pragma: no cover
async def __aenter__(self):
await self.start_server()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
class AioHTTPTestCase(unittest.TestCase):
"""A base class to allow for unittest web applications using
aiohttp.
Provides the following:
* self.client (aiohttp.test_utils.TestClient): an aiohttp test client.
* self.loop (asyncio.BaseEventLoop): the event loop in which the
application and server are running.
* self.app (aiohttp.web.Application): the application returned by
self.get_application()
Note that the TestClient's methods are asynchronous: you have to
execute function on the test client using asynchronous methods.
"""
async def get_application(self):
"""
This method should be overridden
to return the aiohttp.web.Application
object to test.
"""
return self.get_app()
def get_app(self):
"""Obsolete method used to constructing web application.
Use .get_application() coroutine instead
"""
raise RuntimeError("Did you forget to define get_application()?")
def setUp(self):
self.loop = setup_test_loop()
self.app = self.loop.run_until_complete(self.get_application())
self.server = self.loop.run_until_complete(self.get_server(self.app))
self.client = self.loop.run_until_complete(
self.get_client(self.server))
self.loop.run_until_complete(self.client.start_server())
self.loop.run_until_complete(self.setUpAsync())
async def setUpAsync(self):
pass
def tearDown(self):
self.loop.run_until_complete(self.tearDownAsync())
self.loop.run_until_complete(self.client.close())
teardown_test_loop(self.loop)
async def tearDownAsync(self):
pass
async def get_server(self, app):
"""Return a TestServer instance."""
return TestServer(app, loop=self.loop)
async def get_client(self, server):
"""Return a TestClient instance."""
return TestClient(server, loop=self.loop)
def unittest_run_loop(func, *args, **kwargs):
"""A decorator dedicated to use with asynchronous methods of an
AioHTTPTestCase.
Handles executing an asynchronous function, using
the self.loop of the AioHTTPTestCase.
"""
@functools.wraps(func, *args, **kwargs)
def new_func(self, *inner_args, **inner_kwargs):
return self.loop.run_until_complete(
func(self, *inner_args, **inner_kwargs))
return new_func
@contextlib.contextmanager
def loop_context(loop_factory=asyncio.new_event_loop, fast=False):
"""A contextmanager that creates an event_loop, for test purposes.
Handles the creation and cleanup of a test loop.
"""
loop = setup_test_loop(loop_factory)
yield loop
teardown_test_loop(loop, fast=fast)
def setup_test_loop(loop_factory=asyncio.new_event_loop):
"""Create and return an asyncio.BaseEventLoop
instance.
The caller should also call teardown_test_loop,
once they are done with the loop.
"""
loop = loop_factory()
asyncio.set_event_loop(None)
if sys.platform != "win32":
policy = asyncio.get_event_loop_policy()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(loop)
with contextlib.suppress(NotImplementedError):
policy.set_child_watcher(watcher)
return loop
def teardown_test_loop(loop, fast=False):
"""Teardown and cleanup an event_loop created
by setup_test_loop.
"""
closed = loop.is_closed()
if not closed:
loop.call_soon(loop.stop)
loop.run_forever()
loop.close()
if not fast:
gc.collect()
asyncio.set_event_loop(None)
def _create_app_mock():
app = mock.Mock()
app._debug = False
app.on_response_prepare = Signal(app)
app.on_response_prepare.freeze()
return app
def _create_transport(sslcontext=None):
transport = mock.Mock()
def get_extra_info(key):
if key == 'sslcontext':
return sslcontext
else:
return None
transport.get_extra_info.side_effect = get_extra_info
return transport
def make_mocked_request(method, path, headers=None, *,
match_info=sentinel,
version=HttpVersion(1, 1), closing=False,
app=None,
writer=sentinel,
protocol=sentinel,
transport=sentinel,
payload=sentinel,
sslcontext=None,
client_max_size=1024**2,
loop=...):
"""Creates mocked web.Request testing purposes.
Useful in unit tests, when spinning full web server is overkill or
specific conditions and errors are hard to trigger.
"""
task = mock.Mock()
if loop is ...:
loop = mock.Mock()
loop.create_future.return_value = ()
if version < HttpVersion(1, 1):
closing = True
if headers:
headers = CIMultiDict(headers)
raw_hdrs = tuple(
(k.encode('utf-8'), v.encode('utf-8')) for k, v in headers.items())
else:
headers = CIMultiDict()
raw_hdrs = ()
chunked = 'chunked' in headers.get(hdrs.TRANSFER_ENCODING, '').lower()
message = RawRequestMessage(
method, path, version, headers,
raw_hdrs, closing, False, False, chunked, URL(path))
if app is None:
app = _create_app_mock()
if protocol is sentinel:
protocol = mock.Mock()
if transport is sentinel:
transport = _create_transport(sslcontext)
if writer is sentinel:
writer = mock.Mock()
writer.write_headers = make_mocked_coro(None)
writer.write = make_mocked_coro(None)
writer.write_eof = make_mocked_coro(None)
writer.drain = make_mocked_coro(None)
writer.transport = transport
protocol.transport = transport
protocol.writer = writer
if payload is sentinel:
payload = mock.Mock()
req = Request(message, payload,
protocol, writer, task, loop,
client_max_size=client_max_size)
match_info = UrlMappingMatchInfo(
{} if match_info is sentinel else match_info, mock.Mock())
match_info.add_app(app)
req._match_info = match_info
return req
def make_mocked_coro(return_value=sentinel, raise_exception=sentinel):
"""Creates a coroutine mock."""
@asyncio.coroutine
def mock_coro(*args, **kwargs):
if raise_exception is not sentinel:
raise raise_exception
return return_value
return mock.Mock(wraps=mock_coro)
| 29.459559
| 79
| 0.618682
|
51f23ab1f5359edd477fed3073588d3b82035e33
| 13,214
|
py
|
Python
|
src/compas_rhino/install.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
src/compas_rhino/install.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
src/compas_rhino/install.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import itertools
import os
import sys
import compas_rhino
import compas._os
import compas.plugins
__all__ = [
'install',
'installable_rhino_packages',
'after_rhino_install'
]
INSTALLED_VERSION = None
def install(version=None, packages=None, clean=False):
"""Install COMPAS for Rhino.
Parameters
----------
version : {'5.0', '6.0', '7.0', '8.0'}, optional
The version number of Rhino.
Default is ``'7.0'``.
packages : list of str, optional
List of packages to install or None to use default package list.
Default is the result of ``installable_rhino_packages``,
which collects all installable packages in the current environment.
clean : bool, optional
If True, this will clean up the entire scripts folder and remove
also existing symlinks that are not importable in the current environment.
Examples
--------
.. code-block:: python
import compas_rhino.install
compas_rhino.install.install()
.. code-block:: bash
python -m compas_rhino.install
"""
version = compas_rhino._check_rhino_version(version)
# We install COMPAS packages in the scripts folder
# instead of directly as IPy module.
scripts_path = compas_rhino._get_rhino_scripts_path(version)
# This is for old installs
ipylib_path = compas_rhino._get_rhino_ironpython_lib_path(version)
# Filter the provided list of packages
# If no packages are provided
# this first collects all installable packages from the environment.
packages = _filter_installable_packages(version, packages)
results = []
symlinks_to_install = []
symlinks_to_uninstall = []
exit_code = 0
# check all installable packages
# add the packages that can't be imported from the current env to the list of symlinks to uninstall
# and remove the package name from the list of installable packages
# make a copy of the list to avoid problems with removing items
# note: perhaps this should already happen in the filter function...
for name in packages[:]:
try:
importlib.import_module(name)
except ImportError:
path = os.path.join(scripts_path, name)
symlinks_to_uninstall.append(dict(name=name, link=path))
packages.remove(name)
# Also remove all broken symlinks from the scripts folder
# because ... they're broken!
# If it is an actual folder or a file, leave it alone
# because probably someone put it there on purpose.
for name in os.listdir(scripts_path):
path = os.path.join(scripts_path, name)
if os.path.islink(path):
if not os.path.exists(path):
symlinks_to_uninstall.append(dict(name=name, link=path))
try:
importlib.import_module(name)
except ImportError:
pass
else:
if name not in packages:
packages.append(name)
# If the scripts folder is supposed to be cleaned
# also remove all existing symlinks that cannot be imported
# and reinstall symlinks that can be imported
if clean:
for name in os.listdir(scripts_path):
path = os.path.join(scripts_path, name)
if os.path.islink(path):
if os.path.exists(path):
try:
importlib.import_module(name)
except ImportError:
path = os.path.join(scripts_path, name)
symlinks_to_uninstall.append(dict(name=name, link=path))
else:
if name not in packages:
packages.append(name)
# add all of the packages in the list of installable packages
# to the list of symlinks to uninstall
# and to the list of symlinks to install
for package in packages:
symlink_path = os.path.join(scripts_path, package)
symlinks_to_uninstall.append(dict(name=package, link=symlink_path))
package_path = compas_rhino._get_package_path(importlib.import_module(package))
symlinks_to_install.append(dict(name=package, source_path=package_path, link=symlink_path))
# Handle legacy install location
# This does not always work,
# and especially not in cases where it is not necessary :)
if ipylib_path:
legacy_path = os.path.join(ipylib_path, package)
if os.path.exists(legacy_path):
symlinks_to_uninstall.append(dict(name=package, link=legacy_path))
# -------------------------
# Uninstall first
# -------------------------
symlinks = [link['link'] for link in symlinks_to_uninstall]
uninstall_results = compas._os.remove_symlinks(symlinks)
# Let the user know if some symlinks could not be removed.
for uninstall_data, success in zip(symlinks_to_uninstall, uninstall_results):
if not success:
results.append((uninstall_data['name'], 'ERROR: Cannot remove symlink, try to run as administrator.'))
# Handle legacy bootstrapper
# Again, only if possible...
if ipylib_path:
if not compas_rhino._try_remove_bootstrapper(ipylib_path):
results.append(('compas_bootstrapper', 'ERROR: Cannot remove legacy compas_bootstrapper, try to run as administrator.'))
# -------------------------
# Ready to start installing
# -------------------------
# create new symlinks and register the results
symlinks = [(link['source_path'], link['link']) for link in symlinks_to_install]
install_results = compas._os.create_symlinks(symlinks)
# set the exit code based on the installation results
if not all(install_results):
exit_code = -1
# make a list of installed packages
# based on the installation results
# and update the general results list
installed_packages = []
for install_data, success in zip(symlinks_to_install, install_results):
if success:
installed_packages.append(install_data['name'])
result = 'OK'
else:
result = 'ERROR: Cannot create symlink, try to run as administrator.'
results.append((install_data['name'], result))
# finalize the general results list with info about the bootstrapper
if exit_code == -1:
results.append(('compas_bootstrapper', 'WARNING: One or more packages failed, will not install bootstrapper, try uninstalling first'))
else:
try:
_update_bootstrapper(scripts_path, packages)
results.append(('compas_bootstrapper', 'OK'))
except: # noqa: E722
results.append(('compas_bootstrapper', 'ERROR: Could not create compas_bootstrapper to auto-determine Python environment'))
# output the outcome of the installation process
# perhaps we should more info here
print('Installing COMPAS packages to Rhino {0} scripts folder:'.format(version))
print('{}\n'.format(scripts_path))
for package, status in results:
print(' {} {}'.format(package.ljust(20), status))
if status != 'OK':
exit_code = -1
if exit_code == 0 and len(installed_packages):
print('\nRunning post-installation steps...\n')
if not _run_post_execution_steps(after_rhino_install(installed_packages)):
exit_code = -1
print('\nInstall completed.')
if exit_code != 0:
sys.exit(exit_code)
global INSTALLED_VERSION
INSTALLED_VERSION = version
def _run_post_execution_steps(steps_generator):
all_steps_succeeded = True
post_execution_errors = []
for result in steps_generator:
if isinstance(result, Exception):
post_execution_errors.append(result)
continue
for item in result:
try:
package, message, success = item
status = 'OK' if success else 'ERROR'
if not success:
all_steps_succeeded = False
print(' {} {}: {}'.format(package.ljust(20), status, message))
except ValueError:
post_execution_errors.append(ValueError('Step ran without errors but result is wrongly formatted: {}'.format(str(item))))
if post_execution_errors:
print('\nOne or more errors occurred:\n')
for error in post_execution_errors:
print(' - {}'.format(repr(error)))
all_steps_succeeded = False
return all_steps_succeeded
@compas.plugins.plugin(category='install', pluggable_name='installable_rhino_packages', tryfirst=True)
def default_installable_rhino_packages():
# While this list could obviously be hard-coded, I think
# eating our own dogfood and using plugins to define this, just like
# any other extension/plugin would be is a better way to ensure consistent behavior.
return ['compas', 'compas_rhino']
@compas.plugins.pluggable(category='install', selector='collect_all')
def installable_rhino_packages():
"""Provide a list of packages to make available inside Rhino.
Extensions providing Rhino or Grasshopper features
can implement this pluggable interface to automatically
have their packages made available inside Rhino when
COMPAS is installed into it.
Examples
--------
>>> import compas.plugins
>>> @compas.plugins.plugin(category='install')
... def installable_rhino_packages():
... return ['compas_fab']
Returns
-------
:obj:`list` of :obj:`str`
List of package names to make available inside Rhino.
"""
pass
@compas.plugins.pluggable(category='install', selector='collect_all')
def after_rhino_install(installed_packages):
"""Allows extensions to execute actions after install to Rhino is done.
Extensions providing Rhino or Grasshopper features
can implement this pluggable interface to perform
additional steps after an installation to Rhino has
been completed.
Parameters
----------
installed_packages : :obj:`list` of :obj:`str`
List of packages that have been installed successfully.
Examples
--------
>>> import compas.plugins
>>> @compas.plugins.plugin(category='install')
... def after_rhino_install(installed_packages):
... # Do something after package is installed to Rhino, eg, copy components, etc
... return [('compas_ghpython', 'GH Components installed', True)]
Returns
-------
:obj:`list` of 3-tuple (str, str, bool)
List containing a 3-tuple with component name, message and True/False success flag.
"""
pass
def _update_bootstrapper(install_path, packages):
# Take either the CONDA environment directory or the current Python executable's directory
python_directory = os.environ.get('CONDA_PREFIX', None) or os.path.dirname(sys.executable)
environment_name = os.environ.get('CONDA_DEFAULT_ENV', '')
conda_exe = os.environ.get('CONDA_EXE', '')
compas_bootstrapper = compas_rhino._get_bootstrapper_path(install_path)
bootstrapper_data = compas_rhino._get_bootstrapper_data(compas_bootstrapper)
installed_packages = bootstrapper_data.get('INSTALLED_PACKAGES', [])
installed_packages = list(set(installed_packages + list(packages)))
with open(compas_bootstrapper, 'w') as f:
f.write('ENVIRONMENT_NAME = r"{}"\n'.format(environment_name))
f.write('PYTHON_DIRECTORY = r"{}"\n'.format(python_directory))
f.write('CONDA_EXE = r"{}"\n'.format(conda_exe))
f.write('INSTALLED_PACKAGES = {}'.format(repr(installed_packages)))
def _filter_installable_packages(version, packages):
ghpython_incompatible = False
if compas.OSX and version == 5.0:
ghpython_incompatible = True
if not packages:
# Flatten list of results (resulting from collect_all pluggable)
packages = sorted(set(itertools.chain.from_iterable(installable_rhino_packages())))
elif 'compas_ghpython' in packages and ghpython_incompatible:
print('Skipping installation of compas_ghpython since it\'s not supported for Rhino 5 for Mac')
if ghpython_incompatible:
packages.remove('compas_ghpython')
return packages
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--version',
choices=compas_rhino.SUPPORTED_VERSIONS,
default=compas_rhino.DEFAULT_VERSION,
help="The version of Rhino to install the packages in."
)
parser.add_argument('-p', '--packages', nargs='+', help="The packages to install.")
parser.add_argument('--clean', dest='clean', default=False, action='store_true')
args = parser.parse_args()
install(version=args.version, packages=args.packages, clean=args.clean)
| 36.502762
| 142
| 0.655895
|
179d6bc144a992b59092574832f397b68e6158b6
| 4,261
|
py
|
Python
|
bulletin/tools/plugins/urls.py
|
rerb/django-bulletin
|
0b64c9f2eeef4f60c54b54e720b7160aeafa9eb5
|
[
"MIT"
] | 5
|
2015-03-13T19:17:23.000Z
|
2016-08-07T00:12:23.000Z
|
bulletin/tools/plugins/urls.py
|
rerb/django-bulletin
|
0b64c9f2eeef4f60c54b54e720b7160aeafa9eb5
|
[
"MIT"
] | 54
|
2015-03-13T20:04:03.000Z
|
2021-07-21T05:25:20.000Z
|
bulletin/tools/plugins/urls.py
|
rerb/django-bulletin
|
0b64c9f2eeef4f60c54b54e720b7160aeafa9eb5
|
[
"MIT"
] | 5
|
2015-02-12T20:19:19.000Z
|
2020-02-26T22:11:47.000Z
|
from django.conf.urls import include, patterns, url
import views
from api import urls as api_urls
urlpatterns = patterns(
'',
########################
# Generic plugin views #
########################
# List installed plugins:
url(r'^submit/$',
views.plugin.PluginListView.as_view(),
name='plugin-list'),
# Generic submit view.
url(r'^submit-post/$',
views.plugin.PluginSubmitView.as_view(),
name='submit'),
# Generic update view.
url(r'^update-post/(?P<post_type>\w+)/(?P<pk>\d+)$',
views.plugin.PluginUpdateView.as_view(),
name='update'),
###############################
# End of generic plugin views #
###############################
url(r'^moderation/$',
views.plugin.ModerationView.as_view(),
name='moderation'),
###############
# Event views #
###############
# Submit an event:
url(r'^event/submit/$',
views.event.EventSubmitView.as_view(),
name='event-submit'),
# Update an event:
url(r'^event/(?P<pk>\d+)$',
views.event.EventUpdateView.as_view(),
name='event-update'),
# List of events.
url(r'^event/$',
views.event.EventListView.as_view(),
name='event-list'),
######################
# End of Event views #
######################
#############
# Job views #
#############
# Submit a job:
url(r'^job/submit/$',
views.job.JobSubmitView.as_view(),
name='job-submit'),
# Update a job:
url(r'^job/(?P<pk>\d+)$',
views.job.JobUpdateView.as_view(),
name='job-update'),
# List of jobs.
url(r'^job/$',
views.job.JobListView.as_view(),
name='job-list'),
####################
# End of Job views #
####################
#####################
# NewResource views #
#####################
# Submit a new resource:
# Same goes for this url as the following. It's turdy.
url(r'^new-resource/submit/$',
views.new_resource.NewResourceSubmitView.as_view(),
name='newresource-submit'),
# Update a new resource:
# Here's an ugly fact. This url has a magic name. It must be
# named 'newresource-update' because oh cripes here's a stinking
# turd - because plugins.view.plugin.PluginUpdateView is going
# to redirect requests to this URL to `{post-type}-update` where
# post-type is `newresource`. Not `new-resource`. `newresource-update`
# it must be.
url(r'^new-resource/(?P<pk>\d+)$',
views.new_resource.NewResourceUpdateView.as_view(),
name='newresource-update'),
# List of new resources.
url(r'^new-resource/$',
views.new_resource.NewResourceListView.as_view(),
name='new-resource-list'),
############################
# End of NewResource views #
############################
#####################
# Opportunity views #
#####################
# Submit a opportunity:
url(r'^opportunity/submit/$',
views.opportunity.OpportunitySubmitView.as_view(),
name='opportunity-submit'),
# Update a opportunity:
url(r'^opportunity/(?P<pk>\d+)$',
views.opportunity.OpportunityUpdateView.as_view(),
name='opportunity-update'),
# List of opportunities.
url(r'^opportunity/$',
views.opportunity.OpportunityListView.as_view(),
name='opportunity-list'),
############################
# End of Opportunity views #
############################
###############
# Story views #
###############
# Submit a story:
url(r'^story/submit/$',
views.story.StorySubmitView.as_view(),
name='story-submit'),
# Update a story:
url(r'^story/(?P<pk>\d+)$',
views.story.StoryUpdateView.as_view(),
name='story-update'),
# List of stories
url(r'^story/$',
views.story.StoryListView.as_view(),
name='story-list'),
######################
# End of Story views #
######################
#######
# API #
#######
url(r'^api/', include(api_urls,
namespace='api',
app_name='Newsletter Plugins API')),
)
| 27.668831
| 74
| 0.504811
|
ffb1ad980cdd5997c39c52f6f67ccc22363d6b0f
| 1,916
|
py
|
Python
|
game_func_q.py
|
stephen447/CS7IS2-Artificial-Intelligence-AI---Group-project
|
59a2a00af3a0bbf84ba5932ebd9efe362201ea89
|
[
"MIT"
] | null | null | null |
game_func_q.py
|
stephen447/CS7IS2-Artificial-Intelligence-AI---Group-project
|
59a2a00af3a0bbf84ba5932ebd9efe362201ea89
|
[
"MIT"
] | null | null | null |
game_func_q.py
|
stephen447/CS7IS2-Artificial-Intelligence-AI---Group-project
|
59a2a00af3a0bbf84ba5932ebd9efe362201ea89
|
[
"MIT"
] | null | null | null |
# 2048.py
# importing the logic.py file
# where we have written all the
# logic functions used.
import logic # Modified this to
import numpy as np
def game(current_matrix, x, curr_score):
# we have to move up
if (x == 'W' or x == 'w'):
# call the move_up function
mat, flag, score = logic.move_up(current_matrix)
logic.add_new_2(mat)
# to move down
elif (x == 'S' or x == 's'):
# call the move_down function
mat, flag, score = logic.move_down(current_matrix)
logic.add_new_2(mat)
# to move left
elif (x == 'A' or x == 'a'):
# call the move_left function
mat, flag, score = logic.move_left(current_matrix)
logic.add_new_2(mat)
# to move right
elif (x == 'D' or x == 'd'):
# call the move_right function
mat, flag, score = logic.move_right(current_matrix)
logic.add_new_2(mat)
else:
print("Invalid Key Pressed")
# Creating rewad for terminal states - win and loss
curr_state = logic.get_current_state(mat)
if(curr_state == 'WON'): # Reward of 4096 for win state
state_val = 500000
elif(curr_state == 'LOST'): # Reward of -4096 for lost state
state_val = -500000
else: # Value of zero for game not over state - just leave the reward as the score for these states
state_val = 0
reward = score+state_val # Reward - is the score improvement from the last state to the next. I also weight the terminal states of win and loss appropriately
new_score = curr_score + score # calculating new score
max_tile = np.max(mat) # return max tile of current state
# print the matrix after each move.
# print(mat[0])
# print(mat[1])
# print(mat[2])
# print(mat[3])
# print('\n')
return mat, x, new_score, max_tile, reward # return new matrix, action, new score, max tile of current node
| 29.476923
| 162
| 0.629958
|
29de4516489dfad4851cb8b2f9987baccd0064c6
| 1,079
|
py
|
Python
|
tests/test_edge_odometry.py
|
Golbstein/python-graphslam
|
cccc022b2f5d797f6511bda9e7dd3a24af403016
|
[
"MIT"
] | 97
|
2020-02-24T00:34:56.000Z
|
2022-03-23T11:43:19.000Z
|
tests/test_edge_odometry.py
|
Golbstein/python-graphslam
|
cccc022b2f5d797f6511bda9e7dd3a24af403016
|
[
"MIT"
] | 3
|
2020-02-18T15:46:40.000Z
|
2022-03-17T02:01:51.000Z
|
tests/test_edge_odometry.py
|
Golbstein/python-graphslam
|
cccc022b2f5d797f6511bda9e7dd3a24af403016
|
[
"MIT"
] | 13
|
2020-06-09T08:27:27.000Z
|
2021-11-23T14:05:14.000Z
|
# Copyright (c) 2020 Jeff Irion and contributors
"""Unit tests for the graph.py module.
"""
import unittest
from graphslam.vertex import Vertex
from graphslam.edge.edge_odometry import EdgeOdometry
from graphslam.pose.r2 import PoseR2
from graphslam.pose.r3 import PoseR3
from graphslam.pose.se2 import PoseSE2
from graphslam.pose.se3 import PoseSE3
class TestEdgeOdometry(unittest.TestCase):
"""Tests for the ``EdgeOdometry`` class.
"""
def test_plot(self):
"""Test that the ``plot`` method is not implemented.
"""
v_none = Vertex(0, None)
v_r2 = Vertex(1, PoseR2([1, 2]))
v_se2 = Vertex(2, PoseSE2([1, 2], 3))
v_r3 = Vertex(3, PoseR3([1, 2, 3]))
v_se3 = Vertex(4, PoseSE3([1, 2, 3], [0.5, 0.5, 0.5, 0.5]))
with self.assertRaises(NotImplementedError):
e = EdgeOdometry(0, 1, 0, [v_none, v_none])
e.plot()
for v in [v_r2, v_se2, v_r3, v_se3]:
e = EdgeOdometry(0, 1, 0, [v, v])
e.plot()
if __name__ == '__main__':
unittest.main()
| 24.522727
| 67
| 0.613531
|
20ac501dcbb2e26df8785173889db5d7723a88f5
| 18,977
|
py
|
Python
|
run_pretraining.py
|
xuanyuan14/PROP
|
455db8370253d4132376500f2deb5d23c507a7aa
|
[
"Apache-2.0"
] | 87
|
2020-10-23T10:27:46.000Z
|
2022-03-21T09:26:38.000Z
|
run_pretraining.py
|
shubhampachori12110095/PROP
|
455db8370253d4132376500f2deb5d23c507a7aa
|
[
"Apache-2.0"
] | 11
|
2020-12-22T07:16:33.000Z
|
2021-08-22T02:47:52.000Z
|
run_pretraining.py
|
shubhampachori12110095/PROP
|
455db8370253d4132376500f2deb5d23c507a7aa
|
[
"Apache-2.0"
] | 12
|
2020-10-21T06:42:31.000Z
|
2022-03-15T07:13:59.000Z
|
import os
import json
import random
import logging
import numpy as np
from tqdm import tqdm
from pathlib import Path
from collections import namedtuple
from argparse import ArgumentParser
from tempfile import TemporaryDirectory
import torch
from tensorboardX import SummaryWriter
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader, Dataset, SequentialSampler
from pytorch_pretrain_bert.modeling import PROP, BertConfig
from pytorch_pretrain_bert.tokenization import BertTokenizer
from pytorch_pretrain_bert.optimization import BertAdam, warmup_linear
InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids label lm_label_ids ")
log_format = '%(asctime)-10s: %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
def convert_example_to_features(example, max_seq_length):
label = example["label"]
input_ids = example["input_ids"]
segment_ids = example["segment_ids"]
masked_label_ids = example["masked_label_ids"]
masked_lm_positions = example["masked_lm_positions"]
# The preprocessed data should be already truncated
assert len(input_ids) == len(segment_ids) <= max_seq_length
input_array = np.zeros(max_seq_length, dtype=np.int)
input_array[:len(input_ids)] = input_ids
mask_array = np.zeros(max_seq_length, dtype=np.int)
mask_array[:len(input_ids)] = 1
segment_array = np.zeros(max_seq_length, dtype=np.int)
segment_array[:len(segment_ids)] = segment_ids
lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)
lm_label_array[masked_lm_positions] = masked_label_ids
features = InputFeatures(input_ids=input_array,
input_mask=mask_array,
segment_ids=segment_array,
lm_label_ids=lm_label_array,
label=label
)
return features
class PregeneratedDataset(Dataset):
def __init__(self, training_path, epoch, num_data_epochs, temp_dir='./', mode='train'):
self.epoch = epoch
self.data_epoch = epoch % num_data_epochs
data_file = training_path / f"epoch_{self.data_epoch}.json"
metrics_file = training_path / f"epoch_{self.data_epoch}_metrics.json"
assert data_file.is_file() and metrics_file.is_file()
metrics = json.loads(metrics_file.read_text())
num_samples = metrics['num_training_examples']
if mode == 'train':
# Samples for one epoch should not larger than 26000000
if num_samples > 26000000:
num_samples = 26000000
else:
num_samples = 1000 # NOT USE
self.temp_dir = None
self.working_dir = None
seq_len = metrics['max_seq_len']
self.temp_dir = TemporaryDirectory(dir=temp_dir)
self.working_dir = Path(self.temp_dir.name)
input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',
mode='w+', dtype=np.int32, shape=(num_samples, seq_len))
input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
labels = np.memmap(filename=self.working_dir/'labels.memmap',
shape=(num_samples), mode='w+', dtype=np.bool)
lm_label_ids = np.memmap(filename=self.working_dir/'lm_label_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
lm_label_ids[:] = -1
logging.info(f"Loading {mode} examples for epoch {epoch}")
with data_file.open() as f:
instance_index = 0
for i, line in enumerate(tqdm(f, total=num_samples, desc=f"{mode} examples")):
if i+1 > num_samples:
break
line = line.strip()
example = json.loads(line)
features = convert_example_to_features(example, seq_len)
input_ids[instance_index] = features.input_ids
segment_ids[instance_index] = features.segment_ids
input_masks[instance_index] = features.input_mask
labels[instance_index] = features.label
lm_label_ids[i] = features.lm_label_ids
instance_index += 1
logging.info('Real num samples:{}'.format(instance_index))
logging.info("Loading complete!")
self.num_samples = num_samples
self.seq_len = seq_len
self.input_ids = input_ids
self.input_masks = input_masks
self.segment_ids = segment_ids
self.labels = labels
self.lm_label_ids = lm_label_ids
def __len__(self):
return self.num_samples
def __getitem__(self, item):
return (torch.tensor(self.input_ids[item].astype(np.int64)),
torch.tensor(self.input_masks[item].astype(np.int64)),
torch.tensor(self.segment_ids[item].astype(np.int64)),
torch.tensor(int(self.labels[item])),
torch.tensor(self.lm_label_ids[item].astype(np.int64)),
)
class RandomPairSampler(torch.utils.data.Sampler):
def __init__(self, data_source, negtive=1):
self.data_source = data_source
self.negtive = negtive
if (len(self.data_source)%(self.negtive+1)) !=0:
raise ValueError('data length {} % {} !=0, can not pair data!'.format(len(self.data_source), self.negtive+1))
@property
def num_samples(self):
return len(self.data_source)
def __iter__(self):
indices = torch.arange(len(self.data_source))
paired_indices = indices.unfold(0, self.negtive+1, self.negtive+1)
paired_indices = torch.stack([paired_indices[i] for i in range(len(paired_indices))])
paired_indices = paired_indices[torch.randperm(len(paired_indices))]
indices = paired_indices.view(-1)
return iter(indices.tolist())
def __len__(self):
return len(self.data_source)
def main():
parser = ArgumentParser()
parser.add_argument('--pregenerated_data', type=Path, required=True)
parser.add_argument('--output_dir', type=Path, required=True)
parser.add_argument("--temp_dir", type=str, default='./')
parser.add_argument("--bert_model", type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--do_lower_case", action="store_true")
parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for")
parser.add_argument("--negtive_num",
type=int,
default=1,
help="Nums of negtive exmaples for one positive example.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--save_checkpoints_steps",
default=10000,
type=int,
help="How often to save the model checkpoint.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
args = parser.parse_args()
assert args.pregenerated_data.is_dir(), \
"--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!"
samples_per_epoch = []
for i in range(args.epochs):
epoch_file = args.pregenerated_data / f"epoch_{i}.json"
metrics_file = args.pregenerated_data / f"epoch_{i}_metrics.json"
if epoch_file.is_file() and metrics_file.is_file():
metrics = json.loads(metrics_file.read_text())
# Samples for one epoch should not larger than 26000000
metrics['num_training_examples'] = metrics['num_training_examples'] if metrics['num_training_examples'] < 26000000 else 26000000
samples_per_epoch.append(metrics['num_training_examples'])
else:
if i == 0:
exit("No training data was found!")
print(f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).")
print("This script will loop over the available data, but training diversity may be negatively impacted.")
num_data_epochs = i
break
else:
num_data_epochs = args.epochs
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logging.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
args.output_dir.mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
total_train_examples = 0
for i in range(args.epochs):
# The modulo takes into account the fact that we may loop over limited epochs of data
total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]
num_train_optimization_steps = int(
total_train_examples / args.train_batch_size / args.gradient_accumulation_steps)
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
model = PROP.from_pretrained(args.bert_model)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
# try:
# from apex.parallel import DistributedDataParallel as DDP
# except ImportError:
# raise ImportError(
# "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
# model = DDP(model)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[
args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
model.train()
for epoch in range(args.epochs):
epoch_train_dataset = PregeneratedDataset(epoch=epoch, training_path=args.pregenerated_data,
num_data_epochs=num_data_epochs, temp_dir=args.temp_dir)
epoch_eval_dataset = PregeneratedDataset(epoch=epoch, training_path=args.pregenerated_data,
num_data_epochs=num_data_epochs, temp_dir=args.temp_dir, mode='eval')
if args.local_rank == -1:
train_sampler = RandomPairSampler(epoch_train_dataset, args.negtive_num)
eval_sampler = SequentialSampler(epoch_eval_dataset)
else:
# Not supported
train_sampler = DistributedSampler(epoch_train_dataset)
eval_sampler = DistributedSampler(epoch_eval_dataset)
train_dataloader = DataLoader(epoch_train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
eval_dataloader = DataLoader(epoch_eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
tr_loss = 0
nb_tr_steps = 0
logging.info("***** Running training *****")
logging.info(f" Num examples = {total_train_examples}")
logging.info(" Batch size = %d", args.train_batch_size)
logging.info(" Num steps = %d", num_train_optimization_steps)
with tqdm(total=len(train_dataloader), desc=f"Epoch {epoch}") as pbar:
for step, batch in enumerate(train_dataloader):
model.train()
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label, lm_label_ids = batch
loss = model(input_ids, segment_ids, input_mask, lm_label_ids, label)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_steps += 1
pbar.update(1)
mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps
pbar.set_postfix_str(f"Loss: {mean_loss:.5f}")
writer.add_scalar('train/loss', round(mean_loss,4), global_step)
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps,
args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % args.save_checkpoints_steps == 0:
with torch.no_grad():
# Save a ckpt
logging.info("** ** * Saving model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = args.output_dir / "pytorch_model_{}.bin".format(global_step)
torch.save(model_to_save.state_dict(), str(output_model_file))
# Save the last model
logging.info("** ** * Saving model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = args.output_dir / "pytorch_model_last.bin"
torch.save(model_to_save.state_dict(), str(output_model_file))
writer.close()
if __name__ == '__main__':
main()
| 46.741379
| 140
| 0.609317
|
9b122a7f25762268fa7a78c33cfa5d9a49e24f6a
| 630
|
py
|
Python
|
gym/migrations/0007_auto_20210317_1154.py
|
Code-Institute-Submissions/danielboots-fytletic
|
67c3000a4b681d7f76255ab11db841a7f2ba613e
|
[
"OLDAP-2.3"
] | 1
|
2021-03-31T18:54:25.000Z
|
2021-03-31T18:54:25.000Z
|
gym/migrations/0007_auto_20210317_1154.py
|
Code-Institute-Submissions/danielboots-fytletic
|
67c3000a4b681d7f76255ab11db841a7f2ba613e
|
[
"OLDAP-2.3"
] | null | null | null |
gym/migrations/0007_auto_20210317_1154.py
|
Code-Institute-Submissions/danielboots-fytletic
|
67c3000a4b681d7f76255ab11db841a7f2ba613e
|
[
"OLDAP-2.3"
] | 1
|
2021-03-31T11:00:11.000Z
|
2021-03-31T11:00:11.000Z
|
# Generated by Django 3.1.6 on 2021-03-17 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gym', '0006_gym_about'),
]
operations = [
migrations.AddField(
model_name='gym',
name='gym_owner',
field=models.ImageField(blank=True, null=True, upload_to='media/%Y/%m/%d'),
),
migrations.AddField(
model_name='gym',
name='gym_owner_name',
field=models.CharField(default='Fytletic Gym Owner', max_length=50),
preserve_default=False,
),
]
| 25.2
| 87
| 0.577778
|
f12ac401c64493f1fc0cd424d536c7773d40fec1
| 14,497
|
py
|
Python
|
hydrus/test/TestServerDB.py
|
Trivernis/hydrus
|
212088ba4b504c3dbce45c602741b0cce2e799b4
|
[
"WTFPL"
] | null | null | null |
hydrus/test/TestServerDB.py
|
Trivernis/hydrus
|
212088ba4b504c3dbce45c602741b0cce2e799b4
|
[
"WTFPL"
] | 23
|
2020-09-28T21:07:00.000Z
|
2021-07-13T16:04:42.000Z
|
hydrus/test/TestServerDB.py
|
Trivernis/hydrus
|
212088ba4b504c3dbce45c602741b0cce2e799b4
|
[
"WTFPL"
] | 2
|
2021-01-04T14:12:38.000Z
|
2021-04-18T03:37:02.000Z
|
import time
import unittest
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core.networking import HydrusNetwork
from hydrus.core.networking import HydrusNetworking
from hydrus.server import ServerDB
from hydrus.test import TestController
class TestServerDB( unittest.TestCase ):
def _read( self, action, *args, **kwargs ): return TestServerDB._db.Read( action, *args, **kwargs )
def _write( self, action, *args, **kwargs ): return TestServerDB._db.Write( action, True, *args, **kwargs )
@classmethod
def setUpClass( cls ):
cls._db = ServerDB.DB( HG.test_controller, TestController.DB_DIR, 'server' )
@classmethod
def tearDownClass( cls ):
cls._db.Shutdown()
while not cls._db.LoopIsFinished():
time.sleep( 0.1 )
del cls._db
def _test_account_creation( self ):
result = sorted( self._read( 'account_types', self._tag_service_key, self._tag_service_account ), key = lambda at: at.GetTitle() )
( self._tag_service_admin_account_type, self._null_account_type ) = result
self.assertEqual( self._tag_service_admin_account_type.GetTitle(), 'administrator' )
self.assertEqual( self._null_account_type.GetTitle(), 'null account' )
#
self._regular_user_account_type = HydrusNetwork.AccountType.GenerateNewAccountType( 'regular user', { HC.CONTENT_TYPE_MAPPINGS : HC.PERMISSION_ACTION_CREATE }, HydrusNetworking.BandwidthRules() )
self._deletee_user_account_type = HydrusNetwork.AccountType.GenerateNewAccountType( 'deletee user', {}, HydrusNetworking.BandwidthRules() )
new_account_types = [ self._tag_service_admin_account_type, self._null_account_type, self._regular_user_account_type, self._deletee_user_account_type ]
#
self._write( 'account_types', self._tag_service_key, self._tag_service_account, new_account_types, {} )
edited_account_types = self._read( 'account_types', self._tag_service_key, self._tag_service_account )
self.assertEqual(
{ at.GetAccountTypeKey() for at in edited_account_types },
{ at.GetAccountTypeKey() for at in ( self._tag_service_admin_account_type, self._null_account_type, self._regular_user_account_type, self._deletee_user_account_type ) }
)
#
r_keys = self._read( 'registration_keys', self._tag_service_key, self._tag_service_account, 5, self._deletee_user_account_type.GetAccountTypeKey(), 86400 * 365 )
access_keys = [ self._read( 'access_key', self._tag_service_key, r_key ) for r_key in r_keys ]
account_keys = [ self._read( 'account_key_from_access_key', self._tag_service_key, access_key ) for access_key in access_keys ]
accounts = [ self._read( 'account', self._tag_service_key, account_key ) for account_key in account_keys ]
for account in accounts:
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._deletee_user_account_type.GetAccountTypeKey() )
#
deletee_account_type_keys_to_replacement_account_type_keys = { self._deletee_user_account_type.GetAccountTypeKey() : self._regular_user_account_type.GetAccountTypeKey() }
new_account_types = [ self._tag_service_admin_account_type, self._null_account_type, self._regular_user_account_type ]
self._write( 'account_types', self._tag_service_key, self._tag_service_account, new_account_types, deletee_account_type_keys_to_replacement_account_type_keys )
accounts = [ self._read( 'account', self._tag_service_key, account_key ) for account_key in account_keys ]
self._tag_service_regular_account = accounts[0]
for account in accounts:
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._regular_user_account_type.GetAccountTypeKey() )
#
r_keys = self._read( 'registration_keys', self._tag_service_key, self._tag_service_account, 5, self._regular_user_account_type.GetAccountTypeKey(), 86400 * 365 )
self.assertEqual( len( r_keys ), 5 )
for r_key in r_keys: self.assertEqual( len( r_key ), 32 )
r_key = r_keys[0]
access_key = self._read( 'access_key', self._tag_service_key, r_key )
access_key_2 = self._read( 'access_key', self._tag_service_key, r_key )
self.assertNotEqual( access_key, access_key_2 )
with self.assertRaises( HydrusExceptions.InsufficientCredentialsException ):
# this access key has been replaced
self._read( 'account_key_from_access_key', self._tag_service_key, access_key )
account_key = self._read( 'account_key_from_access_key', self._tag_service_key, access_key_2 )
with self.assertRaises( HydrusExceptions.InsufficientCredentialsException ):
# this registration token has been deleted
self._read( 'access_key', self._tag_service_key, r_key )
def _test_account_modification( self ):
regular_account_key = self._tag_service_regular_account.GetAccountKey()
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._regular_user_account_type.GetAccountTypeKey() )
self._write( 'modify_account_account_type', self._tag_service_key, self._tag_service_account, regular_account_key, self._tag_service_admin_account_type.GetAccountTypeKey() )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._tag_service_admin_account_type.GetAccountTypeKey() )
self._write( 'modify_account_account_type', self._tag_service_key, self._tag_service_account, regular_account_key, self._regular_user_account_type.GetAccountTypeKey() )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._regular_user_account_type.GetAccountTypeKey() )
#
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertFalse( account.IsBanned() )
ban_reason = 'oh no no no'
self._write( 'modify_account_ban', self._tag_service_key, self._tag_service_account, regular_account_key, ban_reason, None )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertTrue( account.IsBanned() )
( reason, created, expires ) = account.GetBannedInfo()
self.assertEqual( reason, ban_reason )
self.assertTrue( HydrusData.GetNow() - 5 < created < HydrusData.GetNow() + 5 )
self.assertEqual( expires, None )
ban_reason = 'just having a giggle m8'
ban_expires = HydrusData.GetNow() + 86400
self._write( 'modify_account_ban', self._tag_service_key, self._tag_service_account, regular_account_key, ban_reason, ban_expires )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertTrue( account.IsBanned() )
( reason, created, expires ) = account.GetBannedInfo()
self.assertEqual( reason, ban_reason )
self.assertTrue( HydrusData.GetNow() - 5 < created < HydrusData.GetNow() + 5 )
self.assertEqual( expires, ban_expires )
self._write( 'modify_account_unban', self._tag_service_key, self._tag_service_account, regular_account_key )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertFalse( account.IsBanned() )
#
set_expires = HydrusData.GetNow() - 5
self._write( 'modify_account_expires', self._tag_service_key, self._tag_service_account, regular_account_key, set_expires )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertTrue( account.IsExpired() )
self.assertEqual( set_expires, account.GetExpires() )
set_expires = HydrusData.GetNow() + 86400
self._write( 'modify_account_expires', self._tag_service_key, self._tag_service_account, regular_account_key, set_expires )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertFalse( account.IsExpired() )
self.assertEqual( set_expires, account.GetExpires() )
set_expires = None
self._write( 'modify_account_expires', self._tag_service_key, self._tag_service_account, regular_account_key, set_expires )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertFalse( account.IsExpired() )
self.assertEqual( set_expires, account.GetExpires() )
#
set_message = 'hello'
self._write( 'modify_account_set_message', self._tag_service_key, self._tag_service_account, regular_account_key, set_message )
account = self._read( 'account', self._tag_service_key, regular_account_key )
( message, created ) = account.GetMessageAndTimestamp()
self.assertEqual( message, set_message )
set_message = ''
self._write( 'modify_account_set_message', self._tag_service_key, self._tag_service_account, regular_account_key, set_message )
account = self._read( 'account', self._tag_service_key, regular_account_key )
( message, created ) = account.GetMessageAndTimestamp()
self.assertEqual( message, set_message )
def _test_content_creation( self ):
tag = 'character:samus aran'
hash = HydrusData.GenerateKey()
mappings_content = HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPINGS, ( tag, ( hash, ) ) )
mapping_content = HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPING, ( tag, hash ) )
client_to_server_update = HydrusNetwork.ClientToServerUpdate()
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PEND, mappings_content )
self._write( 'update', self._tag_service_key, self._tag_service_regular_account, client_to_server_update, HydrusData.GetNow() )
# can extend this to generate and fetch an actual update given a timespan
#
result = self._read( 'account_from_content', self._tag_service_key, mapping_content )
self.assertEqual( result.GetAccountKey(), self._tag_service_regular_account.GetAccountKey() )
def _test_init_server_admin( self ):
result = self._read( 'access_key', HC.SERVER_ADMIN_KEY, b'init' )
self.assertEqual( type( result ), bytes )
self.assertEqual( len( result ), 32 )
self._admin_access_key = result
#
result = self._read( 'account_key_from_access_key', HC.SERVER_ADMIN_KEY, self._admin_access_key )
self.assertEqual( type( result ), bytes )
self.assertEqual( len( result ), 32 )
self._admin_account_key = result
#
result = self._read( 'account', HC.SERVER_ADMIN_KEY, self._admin_account_key )
self.assertEqual( type( result ), HydrusNetwork.Account )
self.assertEqual( result.GetAccountKey(), self._admin_account_key )
self._admin_account = result
def _test_service_creation( self ):
self._tag_service_key = HydrusData.GenerateKey()
self._file_service_key = HydrusData.GenerateKey()
current_services = self._read( 'services' )
self._tag_service = HydrusNetwork.GenerateService( self._tag_service_key, HC.TAG_REPOSITORY, 'tag repo', 100 )
self._file_service = HydrusNetwork.GenerateService( self._file_service_key, HC.FILE_REPOSITORY, 'file repo', 101 )
new_services = list( current_services )
new_services.append( self._tag_service )
new_services.append( self._file_service )
service_keys_to_access_keys = self._write( 'services', self._admin_account, new_services )
self.assertEqual( set( service_keys_to_access_keys.keys() ), { self._tag_service_key, self._file_service_key } )
self._tag_service_access_key = service_keys_to_access_keys[ self._tag_service_key ]
self._file_service_access_key = service_keys_to_access_keys[ self._file_service_key ]
self._tag_service_account_key = self._read( 'account_key_from_access_key', self._tag_service_key, self._tag_service_access_key )
self._file_service_account_key = self._read( 'account_key_from_access_key', self._file_service_key, self._file_service_access_key )
self._tag_service_account = self._read( 'account', self._tag_service_key, self._tag_service_account_key )
self._file_service_account = self._read( 'account', self._file_service_key, self._file_service_account_key )
self.assertEqual( self._tag_service_account.GetAccountKey(), self._tag_service_account_key )
self.assertEqual( self._file_service_account.GetAccountKey(), self._file_service_account_key )
def test_server( self ):
self._test_init_server_admin()
self._test_service_creation()
self._test_account_creation()
self._test_content_creation()
self._test_account_modification()
| 42.764012
| 203
| 0.660964
|
23d1035ff68ec45edbbcfa9ffdf973f6455aeaf0
| 2,080
|
py
|
Python
|
Tools/scons/scons-local/SCons/Tool/MSCommon/arch.py
|
Syeberman/nohtyP
|
59d7214a5a5474a03c54f45d79ad4fd037989a79
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Tools/scons/scons-local/SCons/Tool/MSCommon/arch.py
|
Syeberman/nohtyP
|
59d7214a5a5474a03c54f45d79ad4fd037989a79
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Tools/scons/scons-local/SCons/Tool/MSCommon/arch.py
|
Syeberman/nohtyP
|
59d7214a5a5474a03c54f45d79ad4fd037989a79
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/MSCommon/arch.py 2018/09/30 19:25:33 Sye"
__doc__ = """Module to define supported Windows chip architectures.
"""
import os
class ArchDefinition(object):
"""
A class for defining architecture-specific settings and logic.
"""
def __init__(self, arch, synonyms=[]):
self.arch = arch
self.synonyms = synonyms
SupportedArchitectureList = [
ArchDefinition(
'x86',
['i386', 'i486', 'i586', 'i686'],
),
ArchDefinition(
'x86_64',
['AMD64', 'amd64', 'em64t', 'EM64T', 'x86_64'],
),
ArchDefinition(
'ia64',
['IA64'],
),
ArchDefinition(
'arm',
['ARM'],
),
]
SupportedArchitectureMap = {}
for a in SupportedArchitectureList:
SupportedArchitectureMap[a.arch] = a
for s in a.synonyms:
SupportedArchitectureMap[s] = a
| 30.588235
| 81
| 0.670192
|
5178824357fc7db7a021861f18542425332bae28
| 369
|
py
|
Python
|
geotrek/feedback/migrations/0029_auto_20220324_0944.py
|
GeotrekCE/Geotrek
|
c1393925c1940ac795ab7fc04819cd8c78bc79fb
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/feedback/migrations/0029_auto_20220324_0944.py
|
GeotrekCE/Geotrek
|
c1393925c1940ac795ab7fc04819cd8c78bc79fb
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/feedback/migrations/0029_auto_20220324_0944.py
|
GeotrekCE/Geotrek
|
c1393925c1940ac795ab7fc04819cd8c78bc79fb
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 3.1.14 on 2022-03-24 09:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('feedback', '0028_auto_20220316_0951'),
]
operations = [
migrations.RenameField(
model_name='report',
old_name='uid',
new_name='external_uuid',
),
]
| 19.421053
| 48
| 0.590786
|
088183274a9a4d2c1bce034cd75160d37640640a
| 67,705
|
py
|
Python
|
elkserver/docker/redelk-base/redelkinstalldata/scripts/modules/email/module.py
|
fastlorenzo/test-ci
|
cc4f221ec013e8b3d1b3d651c4698ecd3f810445
|
[
"BSD-3-Clause"
] | null | null | null |
elkserver/docker/redelk-base/redelkinstalldata/scripts/modules/email/module.py
|
fastlorenzo/test-ci
|
cc4f221ec013e8b3d1b3d651c4698ecd3f810445
|
[
"BSD-3-Clause"
] | 1
|
2020-11-04T18:37:47.000Z
|
2020-11-04T18:37:53.000Z
|
elkserver/docker/redelk-base/redelkinstalldata/scripts/modules/email/module.py
|
fastlorenzo/test-ci
|
cc4f221ec013e8b3d1b3d651c4698ecd3f810445
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
#
# Part of RedELK
# Script to check if there are alarms to be sent
#
# Author: Outflank B.V. / Mark Bergman / @xychix
# Contributor: Lorenzo Bernardi / @fastlorenzo
#
from config import notifications
import socket
import json
import argparse
import csv
import hashlib
import requests
import smtplib
import os
import shutil
from json2html import *
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.utils import COMMASPACE, formatdate
from email.header import Header
from email.utils import formataddr
from email.mime.text import MIMEText
from modules.helpers import *
from subprocess import Popen, PIPE
from time import sleep
info = {
'version': 0.1,
'name': 'email connector',
'description': 'This connector sends RedELK alerts via email',
'type': 'redelk_connector',
'submodule': 'email'
}
class Module():
def __init__(self):
self.logger = logging.getLogger(info['submodule'])
pass
def SendMail(self, to, mail, subject,
fromaddr=notifications['email']['from'],
attachment=None,
smtpSrv=notifications['email']['smtp']['host'],
smtpPort=notifications['email']['smtp']['port'],
smtpName=notifications['email']['smtp']['login'],
smtpPass=notifications['email']['smtp']['pass']
):
msg = MIMEMultipart()
# Read html File
html = mail
msg['Subject'] = subject
msg['From'] = formataddr((str(Header(fromaddr, 'utf-8')), fromaddr))
msg['To'] = ", ".join(to)
msg['Date'] = formatdate()
# DONE PREPARATION, BUILD MAIL
msg.attach(MIMEText(html, 'html'))
if attachment != None:
msg = self.Attach(msg, attachment)
# Sending the stuff
s = smtplib.SMTP(smtpSrv, int(smtpPort))
s.starttls()
s.login(smtpName, smtpPass)
resp = s.sendmail(fromaddr, to, msg.as_string())
self.logger.debug('smtpd response: %s' % (resp))
s.close()
def Attach(self, msg, filename):
with open(filename, 'rb') as fil:
part = MIMEApplication(
fil.read(),
Name=filename
)
part['Content-Disposition'] = 'attachment; filename="%s"' % filename
msg.attach(part)
return msg
def send_alarm(self, alarm):
fontsize = 13
img = 'iVBORw0KGgoAAAANSUhEUgAAAwoAAANwCAYAAACPgZ2GAAAG03pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHja7ZhrciwpDoX/s4peAggJwXJ4RswOevnzUVX2td2P6VfEREe40llQJCmJcyQhHPbP/znhJz5SXIOa19JKiXy0aZNOp8bnpz2+U9TH9/PTY36NfhoPHl8PhKHbe/6K3p9t6ozbjxfedKTxeTzU1xOpL0FvmuXZ5Kv59tdHIxmX53jSl6C2n53Sqn80dbwEzdfEhymvW9/Nejb3d/g04KC0DEVZZOeUI9+SXxbk593vON8xO/NSVvqSJdBYflsrgHxa3lsb40eAPoG8X5aHr+i/976AL/01nr9gWd4ElV9/kOzLeH5XIx8V51cvMPz5wXgn5hcgn7PqOfu5uq4FRMvLox5gpzcxTBxAnh+vFS7nNvr+uBpXxSEnlK844+CaqSUB/ROSppV6Omk/2pkmJqpsgRMRmZIfYxWOmsz85IkrHfHc8soVzqbskDPD8m5LeuhtD30zVTSvxFRJCEuX5t+6wu89/DNXOGdeiFKsT5zwC+yS6x2YcZm738yCkHRevNkD4LfrRX/84D9XiDLtwlxZYI/jKWJY+uFb+cFzZp7RPkMoBV8vAUCEbsOYlGEglpQtlRRdxFMCxwpBHcslqwwYSGayMFI05yLBpcrVzTueHnPFpMgdJjdBhOWSHW5a7pClaviPa8WHOrGlZlbMrQZr1ksuWqyU4uUmue7Z1c2Lu1dv3muuWq2W6rXWVnuTlsmB1krzVltrvUvoKOrI6szvjAwZeeiwUYaPOtroE/eZOm2W6bPONvuSlRdpYpXlq662+k5hkym2bttl+6677X7wtZOPHjvl+Kmnnf7OWnqF7dfrT7CWXqzJg6k7z99ZYzS4v4lIN53Y5QzGRBOM+2UAh5bLWaxJVS5zl7PYhKAwwUi73ISVLmNQqDuJnfTO3Q/m/hBvweof4k3+F3PhUvdPMBeg7pe8/Qpr6+5z88HYMwovpjETfczpUgM3u6fUv9t+C/oW9C3o/y0od7tVEjmkl3G6lTrKXHn35COp1Eq+XTwWJXnsGmY7S8Ys9++keCwtI0O3vbKu0ZqsobsuFJxG2iNN1Xy8tk27m1FaiNUmGriPaOsn975JVUbi6dtXYwOo2t0kYhK1/Rxkq+EH6/JpOe/a+FXr2qmM0cKZlVR4WiejprZaXE33pjKzGfV4X6RIw5DWE4lV50zkW3aQ4S3poeQgDeeTesDgmaSz2SsjLXKkGPlMOWOx0k32Tdg/m5e8J3XaGEXa3klnn6Ajpi65phkou/apqfY+M6U18LkMNoI2TwSnSaFFxmWNbtsHBeSwzBLQkjZEOHMoEPcJUYvzPoU6u98x6dXTqasoxV3KPtNxij1P6n1GWZTsZnON2SnTve5uHu/KTpACYKm3MnI+l5uy2MhaW2x7VM7sq543ldAsrD7XPdrcevI8w1zMmMe+gtDAftgXz2c8sWPQUslWaNybjI0C3EJwn10L0I5GURR1pdkjjNbJtnsgYQQ70pF71trtZPxj7l0MTtQPW/Q2qGm21zpxnYm5B6+JDUQt5dEPT9wh/RYRojrBX+OubMaxL5Tna5exD1prd8sbM7uNmLWtUnW0LsOXzIV7Af6iPgKWMeJp428GXPgwMM8qfRYDH2XnxmUfIbJBTYBoztwScEIv0VWV6DOctI8+eB6gmAqlTadqmA3HNLnOmW7l4sPEvEPzPEjdfZTVB+zv3XLpZffZcR6J9VZsBjwQlAtOlDkSpEaoFWqG2DhHQKKZ1qM+Gm5CFFBWEJ0V128L+27kUilQQ2JClm2w2Al94IPWgqOvvQjsXVZp+4bYuOHV996Sb7euzrIUEysp50Z/P06MUCA1H7wTsd8GpYsSBwuj6ikIMxVCfSylKsIDKkuquBmx0DnuU7QFVIDodstCJHGyIYplVhAuONL2SgCPoZ0yJ7ZqrmcxY/skbGJxjj+lnz5WSIaxTji697224W/oOYlXcO82JhrWJowLlRcYXdu1TlZ2q1UXsCPdbA2guByf8wVm91wAHPCWSZiiJoKHo/ueu87l8nq55o31VRfRrkNsneoD+ikvKdVAZZGxzuE40BBISDzTQK4UfphGOXprQxZz7pGlcuTbt+YlCw7VE7oTyL6VBHmLzxMnMLRjTC/1GqgtyrQIkApVrab7X4Ezx1VGZuu8OryXoJhMJiEfTZRxiBmguTsn2kWxHqOvi29h/XA0D6fJq8w6adhI4PkGLRmS6Gddc2gibDPJZo+jNytjjq6luvq6wJAcCd7biZ3Eo+tZGgsdzMcGD1kJGPJ08U0ee/yIf6UNf/XFb0Hfgv5FgpwDXuPo/V/wcgma+qcdvQAAAYRpQ0NQSUNDIHByb2ZpbGUAAHicfZE9SMNAHMVfU6VSWhzsIOKQoTpZEBVxlFYsgoXSVmjVweTSL2jSkKS4OAquBQc/FqsOLs66OrgKguAHiJOjk6KLlPi/pNAixoPjfry797h7BwitGlPNvklA1Swjk4yL+cKqGHhFEAGEEYIgMVNPZRdz8Bxf9/Dx9S7Gs7zP/TnCStFkgE8knme6YRFvEM9uWjrnfeIIq0gK8TnxhEEXJH7kuuzyG+eywwLPjBi5TII4QiyWe1juYVYxVOIZ4qiiapQv5F1WOG9xVmsN1rknf2GoqK1kuU5zFEksIYU0RMhooIoaLMRo1UgxkaH9uId/xPGnySWTqwpGjgXUoUJy/OB/8LtbszQ95SaF4kD/i21/jAGBXaDdtO3vY9tunwD+Z+BK6/rrLWDuk/RmV4seAYPbwMV1V5P3gMsdYPhJlwzJkfw0hVIJeD+jbyoAQ7dAcM3trbOP0wcgR10t3wAHh8B4mbLXPd490Nvbv2c6/f0A+GNydpVlQhsAAA+caVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA0LjQuMC1FeGl2MiI+CiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICB4bWxuczppcHRjRXh0PSJodHRwOi8vaXB0Yy5vcmcvc3RkL0lwdGM0eG1wRXh0LzIwMDgtMDItMjkvIgogICAgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iCiAgICB4bWxuczpzdEV2dD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL3NUeXBlL1Jlc291cmNlRXZlbnQjIgogICAgeG1sbnM6cGx1cz0iaHR0cDovL25zLnVzZXBsdXMub3JnL2xkZi94bXAvMS4wLyIKICAgIHhtbG5zOkdJTVA9Imh0dHA6Ly93d3cuZ2ltcC5vcmcveG1wLyIKICAgIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIKICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIgogICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICB4bXBNTTpEb2N1bWVudElEPSJnaW1wOmRvY2lkOmdpbXA6OTQ4YjdmOTMtNTM3OC00MzMzLWI4OTAtYjI3ODBjODQ2YTlhIgogICB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOmY5NTlhNWJmLTA2NmMtNDYwNC05YmMzLTNlYTZhYzcyMTY0NiIKICAgeG1wTU06T3JpZ2luYWxEb2N1bWVudElEPSJ4bXAuZGlkOjYyY2NmNGEyLTAwMWMtNDEyMS04ZmY2LTQxOWVjODBkZjRkMyIKICAgR0lNUDpBUEk9IjIuMCIKICAgR0lNUDpQbGF0Zm9ybT0iV2luZG93cyIKICAgR0lNUDpUaW1lU3RhbXA9IjE2MDIwNzY5ODEwMTc5MjMiCiAgIEdJTVA6VmVyc2lvbj0iMi4xMC4yMCIKICAgZGM6Rm9ybWF0PSJpbWFnZS9wbmciCiAgIHRpZmY6T3JpZW50YXRpb249IjEiCiAgIHhtcDpDcmVhdG9yVG9vbD0iR0lNUCAyLjEwIj4KICAgPGlwdGNFeHQ6TG9jYXRpb25DcmVhdGVkPgogICAgPHJkZjpCYWcvPgogICA8L2lwdGNFeHQ6TG9jYXRpb25DcmVhdGVkPgogICA8aXB0Y0V4dDpMb2NhdGlvblNob3duPgogICAgPHJkZjpCYWcvPgogICA8L2lwdGNFeHQ6TG9jYXRpb25TaG93bj4KICAgPGlwdGNFeHQ6QXJ0d29ya09yT2JqZWN0PgogICAgPHJkZjpCYWcvPgogICA8L2lwdGNFeHQ6QXJ0d29ya09yT2JqZWN0PgogICA8aXB0Y0V4dDpSZWdpc3RyeUlkPgogICAgPHJkZjpCYWcvPgogICA8L2lwdGNFeHQ6UmVnaXN0cnlJZD4KICAgPHhtcE1NOkhpc3Rvcnk+CiAgICA8cmRmOlNlcT4KICAgICA8cmRmOmxpCiAgICAgIHN0RXZ0OmFjdGlvbj0ic2F2ZWQiCiAgICAgIHN0RXZ0OmNoYW5nZWQ9Ii8iCiAgICAgIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6YmQ2NGY4ZmEtZTU2NS00MDVmLWE1YzYtNDBmYTFhM2MzYzkyIgogICAgICBzdEV2dDpzb2Z0d2FyZUFnZW50PSJHaW1wIDIuMTAgKFdpbmRvd3MpIgogICAgICBzdEV2dDp3aGVuPSIyMDIwLTEwLTA3VDE1OjIzOjAxIi8+CiAgICA8L3JkZjpTZXE+CiAgIDwveG1wTU06SGlzdG9yeT4KICAgPHBsdXM6SW1hZ2VTdXBwbGllcj4KICAgIDxyZGY6U2VxLz4KICAgPC9wbHVzOkltYWdlU3VwcGxpZXI+CiAgIDxwbHVzOkltYWdlQ3JlYXRvcj4KICAgIDxyZGY6U2VxLz4KICAgPC9wbHVzOkltYWdlQ3JlYXRvcj4KICAgPHBsdXM6Q29weXJpZ2h0T3duZXI+CiAgICA8cmRmOlNlcS8+CiAgIDwvcGx1czpDb3B5cmlnaHRPd25lcj4KICAgPHBsdXM6TGljZW5zb3I+CiAgICA8cmRmOlNlcS8+CiAgIDwvcGx1czpMaWNlbnNvcj4KICA8L3JkZjpEZXNjcmlwdGlvbj4KIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAKPD94cGFja2V0IGVuZD0idyI/PlmoiZMAAAAGYktHRAAAAAAAAPlDu38AAAAJcEhZcwAACxMAAAsTAQCanBgAAAAHdElNRQfkCgcNFwCbPcTpAAAgAElEQVR42uzdd7jcxNXH8e+57gY3TAeDjSmmm2J67y1A6CWA6RB6CSS8gQAhAVJoIYROILTQCb3aGDAd04upoRgwBty773n/kCDG3L13VytpVX6f5+F5Et/VSHtmNNJZjWZAREREREREREREREREREREREREREREREREREREREQicfcd3P1Ld7/K3RdQREREREREBHd/0//nU3dfUlERERERESl3kjCf/9RH7r6QoiMiIiIiUt5EYW1v2TB376AIQZNCICIiIiIlNF+Ff18fOEnhEREREREpIXff0yub7O59yh4jPVEQERERkTLq1MrfugC/UaIgIiIiIlI+s9r4+2B3761EQURERESkXGa28fcuwGAlCiIiIiIi5TK9is/sp0RBRERERKRcxlfxmRXdfSklCiIiIiIiShTmtJ0SBRERERGR8phQ5ee2UKIgIiIiIlIe1T5RWMfdS3nPrERBRERERMqo2icK3YFllSiIiIiIiJTDRMCr/OwgJQoiIiIiIiVgZrOAyVV+fGUlCiIiIiIi5VHtewoDyhic9mofIvkQvki1CbB+2LFdY2bfKjIiIiKRTQAWquJzyyhUIpLVJGF1d3/Jf+xdd++h6IiIiES+vr7o1Znl7p3LFh8NPRLJfid2IjAcWHWOPy0N/FIREhERiazaoUdNQB8lCiKSlQShyd3/DvwZ6FDhY6srUiIiIpFNqOGzi5YtOHpHQSS7LgEObeMz0xQmERGRyMbX8Fk9URCRxnP3U6tIEgCeVLREREQi0xMFJQoiuUoS9gLOqOKjs4C7FDEREZHIJtbw2XmVKIhII5OEDYF/AlbFx28wsy8UNRERkcgm1/DZXkoURKRRSUJv4Hoqv7g8u+nAHxU1ERGRukxRoqBEQSTrSYKFSUK14x9PNrN3FTkRERElCkoURIrtcGCrKj/7CHCRQiYiIpJqotBTiYKIpMrd+xOslVCNMcB+ZtasyImIiKSaKOiJgoikmiQYcAXQtcpNjtMLzCIiIrGp5WXm3DxRcPf13H01Va9IvhOFw7x6DypiIiIisV6Ht67hOtzs7u1z8J12me2Yh7r7Iqppkfx1Tgu4+9gqO6dJ7t5PURMREYn1WryR12beHHynB+c45s/cfakoZWnokUjjnA30qPKz55rZRwqZiIhIrKbX+PnuOfhOy8/x/xcBhkRNFkQk/Wx/DXefVeWvFx+7exdFTUREJPbr8Xo1PlFYNgffaUKFY/+01mFIeqIgkv4J3A64tIbz73gzm6LIiYiIxK7We+HOOf6uiwL3unvXpIIjIvXbF1ilys8+A9ypkImIiGQiUeiUg+80o5W/DQQuVqIgkkHu3hv4Sw2bnGRmrsiJiIgkol2Nn8/DE4W2plHf390PVKIgkj2nAfNU+dlHzOwphUxERCQxRXyiMKqKz1xYzcvNShREUuLuA4DDa9jkDEVNREREiUKNqlmYdS7gynDhVyUKIhlwPtChys8ONbOnFTIREZFMJQp5GHo0psrPbQCsoERBpMHcfQdgqxo2OVdRExERUaIQwbc1fHZLJQoijU0SOtR44/8W8JAiJyIikrhaX2bOw9CjWhKFTZQoiDTWEcAyNXz+PM10JCIikopuBUwUJtbw2Q3cvaMSBZEGcPd5CGY6qtaXwPWKnIiISCp61Pj5Djn4TpNr+OxcwKpKFEQa4zSgVw2fv8DMpilsIiIiShRSSBRQoiDSAO6+BLVNhzoO+IciJyIiktlEoX0BE4VVlCiIpO90oGMNn7/azMYrbCIiIkoUUkwUBipREEmRuy8P7FXLJsAlipyIiEiqutf4+XY5+E61JgrLu3uTEgWR9Pypxs7kYTN7X2ETERFJVc8aP1/EJwpdgEWUKIikwN3XAbapcbOrFTkREZHU1fpEIQ+JwtQI2yyuREEk+STBgPNr3GwUcIeiJyIikrq5C5goNEfYpp8SBZHk7QisUeM215rZTIVOREQkdV0LmChEWbS1rxIFkSTPSvd2wJkRTuZ/KnoiIiK5SBTysI5ClERhISUKIsnaH1ihxm0eM7ORCp2IiEhDzFXj54s69GgeJQoiSaXu7p2A30bYVC8xi4iINE4Rhx5F0UuJgkhyjqDCjAGtGA3crtCJiIikz93bA50KmChEOUY9URBJqKPpApwYYdObzGy6IigiItIQXSNsk4dEoWOEbXoqURBJxtFUeAmoDdcpdCIiIrlKFPLwMnOUY+yiREEkZu7eHfhVhE3fNLOXFUEREZGGmSvCNkV9otBeiYJI/H4J9I6w3a0KnYiISG3cfS13v9DdV4uhuKIOPYryRKGdWpdIvJ1Vd3cf49EspQiKiIjUdN3t6O6jwuvoZHdfs87y1oxw/X4wB3FaPcL3+ralsvREQSS6Y4j2NOF5M3tP4RMREanJdvzvncAuwC3hEOCoivpEoVuEbdopURCJL1vvCRwfcfMbFEEREZGarTXH/18M+Esd5RX1HYUoydMMJQoi8TmKClOJtWEm8G+FT0REpGYDW/i3g9x9YMTyijrrUZREYaoSBZEYuPvcBMOOonjMzL5SFH+IZR93v87dv3H3XRURERFpxSIt/JsBp6eYKBT1icIUJQoi8TiUaO8mANyo8P2QJKwKPAfsQ7Ai5BGKioiItGLBCv++vbsPiFCehh79j54oiMRwc9uZ6O8mTAPuVhSDmSaAofx4obqFFBkREalw3WgH9KrwZwNWiVCshh79j54oiMRgMLBwxG0fM7Nx6ux9IPAAP52VYbqal4iIVNApTAgqWTqlRCEPTxTmibDNd0oUROq7wW0PnFRHEXcohr448BAt/yo0Sq1MREQqaGu14WUiJh9xH0cWLBhhm2+UKIjUZy+gX8RtZ1LyYUfu3hW4E5i/wke0toSIiES9qY/yjkKUpwOdlSiIyJw3uU3Ar+so4kkzG1PyMF5B62NI31VLExGRCtq18ffl3b3WJwRREoVOOYiVEgWRlO0ELFvH9reXPNE6juCJTGteUDMTEZEK2nqPrSOwUgqJQqafKLi7AQtE2HSMEgWR6E6uY9tm4K4SJwmDgHPa+Nhk4GU1MxERqWBGFZ8ZVPZEgWAx2ChPPT5XoiAS7UZ3A2D1Oop4zsw+L2nsegA30/bLX8+ZmWY9EhGRSqq5RqyZQqLQPpzcJKsWjLjdp0oURKI5oc7tyzzb0eXAElV87nE1MxERqTNR2DiFRAGy/VRh8YjbfaZEQaRG7r4MsJ0ShUix2w/YrcqPP6DWJiIilZjZLGB8Gx/rE163y5wo9I+YhI1WoiBSu+PqPE/eMLMPS5gkdAHOqvLjXwEj1NRERKQNH1fxmU1LnigsGWGbz82sWYmCSG03u/MB+9ZZzH0lDd+hwKJVfvahSh2UiIhIjYnCVikkClmeIjVKovB+pT8oURCp7HCgixKFmhOsTsCJNWzyoJqaiIjElChs7u7dqiwv6n1w0Z4ojFSiIFLbzW5n4Ig6i/kWeKaE4RsMLFLlZ5uBh9XiREQkpkShM9W/WzizSImCu7cD+ilREEnePsD8dZbxoJnNLFPQwinjallz4jUz+0bNTUREqvBBlZ/bpYyJAtCHaMOilCiI1HCzawQvMdfr/hKGby9q+zVjiFqciIhU6b0qP7etu/cuYaKwfMTt3lWiIFK9rYFl6yxjFuWc8rPWBGuompuIiFTpg/D62pZOBD9clS1RWC3CNhNoZUiXEgWRnzoyhjKeMbNvyxQ0d18bGFhjMjVMzU1ERKphZtOB/1b58f0TTBS6FihReMPMXImCSHU3u0sCW8ZQVBmnRT2sxs+/YmZj1epERKQGI6v83Cruvm7JEoVVI2zzWmt/VKIg8mOHx3RelCpRCMeC7lbjZs+quYmISEKJAsCxCSUKPTJ4HV6A6tcvmt2rShREqjvJulLdo8q2fGZmr5csfIOpfczm82p1IiJSozdq+OzP3b1fAolC7wzGZbWI2+mJgkiV9gJ6xVBOqdYFCGeJOjTCpkoUpFKb6uvu97j7CHfvooiIyGxq+SGuHfDbBBKFeQuSKMwAXlaiIFKdX8ZUTtkWENsMWKrGbcbSynRsUuokYRDwIsGCSQOBlRUVEZnNm4DX8Pl93b3SNWpyxGPI4hOFdSNs86qZTVGiINL2zck6wCoxFNUMPFqy8B0UYZsXW5tlQUp7Hq4OPDbHRbizIiMi3zOzVqfzbEF74PQKfxtfhETB3TsDG0TYtM13BZUoiASOiamcF8q00rC79wS2T6JzktIlCf0J1h7pNsefvlR0RGQOtb4HuKe7D4wxUcja0KMNgSjDNJ9WoiDS9g3KAsCOMRVXtqcJ2xPtF98X1fJktnOwM3BrCxffZqqfM11EyuONGj9vwGkxJgrzZCwem0bcrs13BZUoiMDBQMeYyirb+wl7JtU5SamcR8tD/z5ua/ysiJTSKxG2+bm7bzPHv0UdAbCgu2fpHnrzCNt8ZWYfqimJtMLd27v7px6Pce7eoUSxm8/dZ0SI0ydqeTJbO9qllbZyvSIkIi30G30jXqc/dve5ZitnQB3X/D4ZicUC7t4c4fjvqqZ8PVGQstuKaAuUtGSImc0oUey2J3hJrFavqNlJeIFbHLiylY88oyiJyJzM7GNgdIRNFwfOnO3/f1HHYfTLSDg2IxhaVauq3hVUoiBld0CMZZVt2NH2Ebd7R81Owsf211J5hdPJwB2KlIhU8ELE7Y4Op2HGzMYBk3KeKPw84nZV/RCjREHKfKMyP8Fc7XF5vESx60rwK0YUWj9BAI4nmKmjkovN7AuFSURiThTaAze5e/fw/0f98arhiUL4HbaJsOkMqpxURImClNlgIK53Cj43szL9Ur4l0DXitm+r6ZU+SR8EnN3KR75t4+8iIs/VsW1/4Jrwf78RsYwlMhCDnYg2LeozZlbVkxQlClJm+8ZY1rCSxW7bOrYdqaZX6iRhAeCftP5+y0VmNlbREpFWvFTvTba7/5La12T43tIZiEHUmQcfU/MRaf1mZU2P18Eli9/HEeP0tVpfqc+7Pu7+TlttZLYhASIirfUp79R57Z7h7kdF3Hayu7dr4Hdf3N1nRTz2tavdj54oSFkdEHN5Q0vUMS9BMHNEFHqRubwX9JUJVgFdpo2P/s7MxitiIlKFet8NbA+cQTB5Qq26AAMa+N0HR7yPH0sN73coUZAy3rB0BfaIscjPzOy9EoVw4zq2VaJQznNuW+BJoK15x18FLlPERKRKcQyh6UX0d+4GNqhPbU/0HzyHmNlMJQoile0CxDm0YWjJ4rdJHdu+p+ZXuiThaOBuoFtbHwWONrNZipqI1HD9bW7g/ldp0H53BxaLuO0jtXxYiYKU0f4JdFRlUs8Thc/V/EqTILR394uBC4FqxvHebGbDFDkRqZaZfUNjF/FcowF9qwEn1VGEEgWRVk6w/rQ+d3sUQ0oUv77AQnUUMUqtsBTtpAdwD3BElZuMr/PCJyLl1cgZfNZ097lT3ue2wEoRt/3IzN5XoiBS2WCiLXVeySdm9mGJ4rdandvriULxk4TlgeeBrWrY7GQz+0zRE5EIGrnYaUdgoxT713bAH+so4pFaN1CiIGW6gTFgr5iLHVqyMK5a5/Z6olDsc2xn4Flqm198CHqBWUSiewKY2MD9b57ivvYDVqxj+4eVKIhUNoj4V1IcUrIY1vNEYbyZTVQzLGSC0M7dzwZuBWp5DD8JOMjMXFEUkSjMbApwfwMPYYuU+tluwO/rKGIq8JASBZHK9kqgzNKsbhg+kRlURxEadlTMdtGf4CnCr6l9WN9JJRu6JyLJuLWB+x7g7qunsJ9zgIXr2P6hKD/WKVGQstzMNAE7x1zsR2b2aYnCuAgwTx3bf6mWWLjz6mfAc0CUi+QjwKWKoojE4AEaO/xoryQLd/fVgEPrLOaeKBspUZCyWBdYNOYyh5YshkvVuf1YNcPCJAid3P0CgvURekco4gvgF2bWrGiKSL3MbBJweyMThXARtCT62y7A1VQ3zXQls5QoiLRu9wTKLFui0L/O7cepGRYiSRgADAeOIdoMYrOAvc1stKIpIjG6toH7XgD4eUJlX0j06VC/90zUPleJgpThxqYdwWrMcSvbi8z1Jgrj1RpzfR6Zux8FvEx9s1/93syGKKIiErMngPcauP//C9/li7Pf/QVwcAxF3RV1QyUKUgYbh9l+nD4o2fsJAEvWub2eKOQ3SViEYLaMi4AudRT1MHCWIioicQuHMv61gYewMrB9jP3uRsCVMRWnREGkFRp2FA89UShnkrAH8Dr1zxU+EtjDzGYpqiKSkGuBrxq4/3PcvXMM/e6K4c19pxiO6Q0z+0CJgkjLJ1sHYKcEin66hOFcrM7tJ6tF5urcmc/dbwJuAnrVWdw4YAcz+06RFZGkmNlUGvvUcgBwep1978oEs8L1iOmY7qpnYyUKUnRbUN+UnpUML9lNYxPQs85iZqo55qa+9wbeAvaIobhZwF5m9o4iKyIpuBR4u4H7P8HdN4nY965N8P5jnMOlb1GiIFJZEsOOxhAMoyiT7tQ3Ndv3N4yS7QRhMXe/D7gemDemYk82s/sVXRFJg5nNBH7TwENoD9zm7svU2P/uTfAkoVeMx/K6mb2uREGk5ZOuI7BDAkU/Y2ZesnDG0XEpUcjuudLk7kcCbwDbxFj0+Wb2V0VYRFL2n7A/a+Q1c6i7r1lF/9vZ3S8j+IFmrpiP46Z6C1CiIEW2McEv4XEbXsJY9lJzKmySsBwwDPgb0C3mC9SJirCIpC38Me/cBh/GgmGycFq4aFpL/e9mwCvAIUl073EkCu3VnKTAdkioXCUK0XRRk8xUgtAD+B1wJNAh5uIfBQZr5WURaaCbgTOBfg08hs7AGcCR7n4dwTo0M8K/7Rr+l5ThZvaxEgWRlm+CjBjnM57NDODFEoZ0biUKhTk3moD9gLOJf30RgOeBncxsuqItIo1iZjPd/TyCp6WNNh9wQsr7vCmOQjT0SIpqNWCRBMp9xczKOM1nHO8XdFWzbHiSMAh4Brg6oSThRWBLM5ugaItIBlwHTCzh955JnbMdKVGQotspoXKfKmk840gUeqhZNixBWNzd7yH4tX+NhHbzLLCpmY1VxEUkC8xsfJgslM2DZva1EgWRyrZL8GZIiUI086hZpp4gdHH3XwOvJnhOQPAy3rbhRVlEJEuuKOF3/ndcBSlRkCLeHPUBVkyo+OElDWsciUJvtc7UzoEmd9+fYL2Ps0n2ac4LwGZm9q0iLyJZY2avEAy5LIvxwB1KFEQq2zKhcj8xs8+UKESmJwrpJAlbE/zCfzWwaMK7GxYmCd8o8iKSYVeV6Lv+O853KZUoSBFtlVC5z5Q4ptOUKGQ+QVjN3R8D7ie5J2qzexDYWsONRCQHbgemluS7XhNnYUoUpGg3S+2BzRIqfniJQxvHL8bzq4Um0uaXdPcbCYYAbZLSbm8DdijpDGAikjPhJAv3l+Crvm1msf6oqURBimZdkhuP/VSJ4zomhjLmc/d51URjSxAGhTMZjQT2BCylXZ8B7KZ1EkQkZ8ow+9GVcReoREGKZouEyp0CvF7iuI4F4rgxXFpNtO4EYSV3v4VgBq7tUkwQZgFHmdnpZuaqCRHJmYfCa1lRzQRuVKIg0rrNEyp3hJnNKGtQzawZ+CSGogaoiUZOEFZz97sIXlTeNeX+exLwczO7WDUhIjm9jk0leFehqO43sy+VKIhUvpHqAayaUPEvKsJ8pEShIe16bXe/L2yDO5DeE4TvfQpsYGb3qDZEJOduLfB3uzqJQpUoSJFsALRTopCYD2IoYxWFsarkoMndt3P3IQQv0W/ToEN5GhhkZi+rVkSkAIYBRZyE4XPgPiUKIq3bNMGyn1d4Y3lHYz1376xQVkwQerr7yWFSdg+wUQMP51xgIzP7SjUjIkVgZlOAxwv41S41s5lKFERat2FC5Y4H3lN4Y0kUOgODFMqfJAjzu/vpBDMYnQP0beDhTAOOMLNfJ3XhERFpoEcK9n1mkNCwI4D2ai9SkButeYCVEir+5fBlXiUK0Ez9PzBsDDypcIK7LwscD/wiTKIa7T1gdzMbodoRkYJ6sGDf524zG5VU4XqiIEWxYYLtWe8n8MOCNW/EUNRWZY5j+P7BVuELym8CB2UkSbgBWE1JgogU/Fo2EviwQF/pH0kWrkRBimLdBMt+QeH9wbAYyljb3ZcpYYKweDi86GPgAYIXlC0DhzYRONDMfmFmE9TERaQEivJU4W1giBIFkSpuPhMsW08U/ueJmMoZXJLkoKO77+ruDxL8gvU7oE+GDvElgqcIV6tpi0iJFOU9hUuTXgDT1FakADdjnYBxQKcEiv8GmE8r0f4Q657AaKBDnUV9DvQt6suy7r4ccCCwLzBvBg+xGfgr8Fszm66WLSIlu5bND+R9RrcJQB8zG5fkTvREQYpg1YSSBICXlCTM9stC8J7CUzEUtQiwT8EuPN3d/QB3f5rg3YPjM5okfARsbmYnKUkQkZJey0YD7+f8a/wz6SRBiYIURZLDjvR+wk/FtULvqe7eIc+BcPe53H0Pd7+T4Nepq4B1Mnq4zcCFwIpm9riasYiU3DM5PvZm4KI0dqREQYogyRszvZ/wU/8GZsVQTj+C4Tl5Sw66uPvO7n4LwTCsm4AdycbMRZW8DaxnZsea2SQ1YRGRXP8QeK+ZpfJERImCFMGaShTSE87X/GhMxZ3j7n1zkBx0cvft3f2GMDm4DdgV6JrxQ58B/AFYxcyeUesVEfnB6zk+9gtSu+arnUieuft84Y1bEsaY2XyKcotxHwxcE1NxTwIbm9msjH3HLsAmYUKwI9AjZ9X0MsG0p6+oxYqI/KSPn4dgwpK8edXMBqa1M63MLHm3SoJlv63wVhTnfPvrE8zAc2wGLhyLAtuG/21K9p8YtGQS8Hvgr0WdVUpEpF5m9q27f04wuUaeXJDmzpQoSN6tmmDZbym8FS0Qc3nHuPsoM/tTyolBe4KX4bcMk4OBOa4TJ3hf4mQz+0xNVESkTe/kLFH4DLhRiYJI9ZJ8oqBEobKlEyjzHHfvAZxqZs0JJgf9gS3C/zYBuhegPl4EjjGz4WqaIiJV+yRnx3tB2tNaK1EQJQqVaehRZWskUKYBpwArufthZvZ5TInBosBGwAZhYtC/QPXwZRiza5NMrkRECuq/OTrW74DL096pEgXJLXfvnvBNn54otBz3biQ75Gs74G13/yNwiZmNr/H4+hG89/B9ctC/gNUwnWCc6h9qjY+IiOQyUbjEzCakvVPNeiR5vmFdE3g2oeLHAb20KnOLcd8duDml3Y0FrgNuBZ6d8+Vcd58LWJ3gPYO1wv8WKHD4mwnWsTgtrTm0RUQKfD3bAngoB4c6BegbriidKj1RkDxbNsGy31aSUNHOKe6rJ3B0+N8kdx8ZJg+zgHmBFUrSjzlwJ/A7M3tDTVBEJBZjc3Kc1zQiSVCiIEoUKntT4W3hbjUY7//zBu1+LpJ9JyWLmoEbgLPNTO/MiIjE67scHOMM4JxG7VyJguTZgATLHqnwtugg9RupeZRgBqhnFQoRkUTk4YnC7Wb2qRIFkWwlCp8qvD8Wvg9wqCKRuIcJXlIeplCIiCRqUtYvvcC5jTwAJQqS15vWTiQ7m40WrPqpk4EFFYZEzCRYLO1cM9OwNxERAbjTzF5RoiBSu35AuwTL/1wh/lFitiBwnCIRuxkE7yCca2bvKBwiIqlqyvKlF/hjow9CiYLk1eIJn5xKFH7sLGBuhSE2k4CrgL+a2ScKh4hIQ2R5mYB7zewlJQoi0SyWYNlfm9k0hTjMmty3Bg5QJGLxEfB34CozG6twiIg0VIcMH9slWTgIJQqSV0k+UdCLzP9LEhYErkWLM9ZjFnAHcJGZPaVwiIhkxnwZPa6JwBAlCiLRJflEYZTC+4O/ZbgjzbqJwI3A37RImoiIEoUaPJGVkQ1KFESJwk/p/QTA3U8EdlEkavYScDlwk5lNUDhERJQo1Oj+rByIEgVRovBT45Uk+E40eO7mnJlAML3p5Vl4+UxERKoyf0aP6wElCiL1WSDBsieWPEnYOLzpbVIza7Od3AncCjysF+BFRHKnTwaP6U0z+0iJgkj0G9kuQFclConEdskwSeioltaiZuBJgncPbjOzbxUSERElCjF6KEsHo0RB8qh3wuWXMlFw9wHAYyT7tCavRoTJwc1mplW7RUSKoW8Gj+leJQoi9ZlXiULsScKKwCNKEn7wLfBC2GHfl6XHwCIiEptlM3Y8YwieWitREKmDnijEmySsCjycQlyzYjTwHjASeD/8318C34Sd9DdmNkunmYhIoa9985G9WY/+Y2YzlSiIZDtRmFKijnJjgsXAehb0K34KPAu8TDB8aISZjdYpJCJSestm8JjuzNoBKVGQPOqRcPkdyhBEdz8KOK9g/cA4gmnlHgeGmNn7Ol1ERKQFAzN2PBOAR5UoiNSvc87Lb3SC0Am4BDigIF9pAsFTkduARzRNqYiIVGG1jB3P/WY2VYmCiBKFRiYJC4U31WsV4Ou8AlwK3KgVkEVEpEarZux47shikJQoiBKFkiQK7r458E9g4Zx/lUeBc8zsMZ0KIiIS4XrYFRiQoUOaSoZWY1aiIEoUWtepYB3iXMCfgMMBy/FXeRU42syG6RQQEZE6DMzYPfAjWX0yrkRBlCj8VI+iBMrd1yV4irBkjr/GZOAU4GJNWyoiIjFYP2PHc0dWA6VEQfIo6VmJFi9AgtAJ+D1wPNAux1/lZWA3M/tAzV5ERAqYKEwH7s5qoJpSuGHZ3N2vDedrF4lD0ouR5DpRcPcNgJeAX+U8SbgDWF9JgoiIxHiNbALWzdAhPWpm32U1Xu0TroxVgP8QDBXZzd0HmdkbaqYSQ/atROGn59vCwJ+BPcn3uwgANwP7ZG2FShERyb2VyNYio7dmOVhJP1H4G/8bT94ZuEjtU5QoxJ4gdHH33wDvAHsVIEm4A/iFkgQREUmAhh1lIVFw97X56aOdjd19kIPUF+gAACAASURBVNqoZDxRmNvd++UgQWhy98HAu8AfgW4FqNuRwGC9tCwiIgnZKEPH8kiWhx0lmigA+1f4933VRiXjiQLABhlOEMzddwBGANcAfQpSr5PCJEGLp4mISBLXzw7AZhk6pFuzHrOmhCqiPbBrhT/vEVaUSFTTUtjH+ln84u6+HfACcBfBOMuimARsa2bPqHmLiEhC1gG6Z+RYMj/sKLFEgWDIUaUXReYF1lBblTp8m8I+MvNEIRxitLO7Pw/cA6xWsPqcDPzMzJ5Q0xYRkQRtlaFjedjMxpY1UfhZG3/fQm1V6vB1CvtYyt1Xb3CC0MvdTwc+B24Divh+zwRgczMbomYtIiIJ2zZDx3JzHgKW1PSom7bx9/XVVqUO36S0nwOBFxuQICwC/BI4FOhd4HqcBOxkZsPVpEVEJOFr68LAChk5nGnAvXmIW1MCFdENWLGNj/VQk5U6jElpP3u5+9wpdmJruvuNwEfAKQVPEr4ENjSzR9WcRUQkBVuRnenDHzazcaVMFAiGR7RrwH6lPL5OaT/dgd8knBzM7e4HuvuzwLMEi6UV/WX/t4C1zOwlNWUREUnJzzJ0LLfmJWhJ3LCv1aD9SkmY2URgSkq7O9Hdl00gQVjD3S8HRgFXAmuWpPqGAuuZ2X/VkkVEJA3haJesvMg8DfiPEgUlCpKsD1PaT0fgX3EMQXL3xdz9ZHd/HXgOOJhiLJJWreuALbO+uIyIiBTOz4DOGTmWh/Iy7AiSeZm5mqlP51ablTq9Byyf0r5WA2539+3NrKY1HNy9N7AbwZCi9cjO+Mg0TQWONbPL1GxFRKQBds3Qsdyap8DFmiiEs7UsUMVHF3J3MzNX25U6EoU0bQE84+57mtm7VSQHOwK7EMwA1qHk9bSbmb2iJisiImnL2LCjqeRo2FHsiQLQv8rPdQLmIb1pLkWJQhxWAV5z91uAa4CnzGx62BEtSPBoc1dgY5KbejhPbgEONrPxCoWIiDRI1oYd5eqaGPfNzBI1fHZhJQpSh5EN2m9H4Bfhf1PdfQrBEwMNp/ufScCJZnapQiEiIg22S4aO5da8BS/ul4r71fDZhdR2pQ6fZuAYOgO9lCT8yDBgZSUJIiLSaOGwo60zcji5G3aURKLQP6HPisypt0KQKRMIVpLeyMw+UDhERCQDdic7w47uM7MJeQtg3EOP+tbw2WXUfqUOeiKVHcOB/c1spEIhIiIZsm+GjuWOPAYw7icKC9bw2QFqv6JEIdfGAIcA6ytJEBGRLHH3pQimJc+CKcA9ShSgpxIFScnCCkHDzAIuAZYxsyvMrFkhERFJ9Ka3KRxvL9UbTHbWLronj8OOYk0U3N1qTBQWc/e51I4lokUUgoYYDgwysyPM7FuFQ0Qk0QShi7ufBnwCjHX3vygqVcWtHdkadnRzXmMZ5xOF7kC7Gj5vwIpqzhLR4gpBqj4G9gLWM7MRCoeISOI3u4OAN4EzCH4cawKOd/fFFJ02bQ4smpFjGQvcr0Qh2lvlq6ktS0T9FILUOriTgWXN7Catpi4ikkqScCDwZAvXOgOWVYTadECGjuUOM5uW10DGOetRuwjbKFGQKB1oO6CPIpGoGcClwJlmNkbhEBFJ7Rp3KnBmKx/prii1Gr/ewPYZOqSb8hxPJQqSRwsTrJAsySQI1wF/MLOPFA4RkdRucA04Dzi2jY9+rWi1am+gU0aO5UtgiBKF6InCcu7excymqF1LDTTsSAmCiEjRnF1FkvABoPfEKidbTcCRGTqkW8xsVp5jGuc7ClGmoGoPrKGmLTVaSiGIzVTgCoKpTg9SkiAi0pAb3FMI3gdrzSRgJzMbp4hVtE3G7hFuyntA40wUoj4VWF/tWmqkVb3rNwb4PdDXzA5RgiAi0rAk4TDgD1V89FAze00Ra9VxGTqWD4Hn8h7QOIceTYq43QZq16JEITUjgfOBazXkT0Sk4UnCNsDfqvjoPWZ2gyLWaixXAjbJ0CEVYqbAOBOFyRG3W9fdO5rZdDVzqZKmhqvNFOBfwOVm9pLCISKSiRvbtYHbqrgXGwscqoi16cSMHc/1RQhqbImCmc1y96nUvp5CV2Bl4AW1camiY+0A9FUkqjIKuDJMED5XOEREMnMtWzRMErpU8fGzzOwLRa3VeC4A7JahQ3rNzN5RovBTk4i28Nr6ShSkSksAHRSGyv0lwVRs/wDuNrMZComISOacQzDVd1s+prqhSWV3GNmZEhXg5qIEtinm8kZH3E4vNEu1BigEFc+9CwhWUN7UzG5TkiAikj3uviywR5Uf/52GZrcZz07A4Vk6JAow29H34n6iMIpo48fXd/cmM2tWk5c2rKwQ/GAScBdwA/CImc1USEREMu93VLf21BsUZJx7wvYEFsjQ8TxjZh8rUaicKETRG1gVeFHtXZQotGom8DBwI3CXmU1SkxARyQd3XwHYtcqPn6EfUNuMZxPZe4n5piLFOCuJAsCWShSkCgNL+J1nAEOBO4HbzWy0moGISC6dTnXDvj8I+3xp3S7A8hk6npnArUoUKqvnrfwtqW7BESnvLwc9gH4l+bqTCZ4c3Ekwf/Z3agEiIrm+hg0Edqry4xeY2SxFrdV4NgGnZeywHjezr5QoVPZZHduu5e7dzGyCmr9UsBJgBf1u04BnCWYsegx4Xi+wiYgUymlVXsO+Ba5RuNq0M9l6mgAFG3aURKIwso5tOwAbA/9R25cKijbsaAxwN8FTgyFmNllVLCJSPOG7CTtW+fEr9f5Zm/FsAk7N2GFNpYDDxZJIFGbWUe7mShSkFUV4kXkGcA9wFfCQHi2LiJTCKVT3NKEZuEzhatNOwIoZO6b7zWycEoVWmNk0d/8AWKaOREGkklVzfOxfAZcAlxVt/KKIiFTm7ktT/arBD5vZh4paq/E0svc0AQo47AjiX3AN4K06tl3G3fvrNJAWOoa5M/jrQTU+BQ4FFjezM5UkiIiUzm+obt0EgEsVrjb9nOCdxSwZD9yrRCH5RAGqn19YymVN4h8ql6SxwAnA0mZ2uZlNUxWKiJSLu/cF9q7y458V9WYzxnga2ZvpCOBOM5uqRKE6b9e5/W46FaQF6+SlHyOYrWIZMzuvqB2HiIhU5WSCyVqqcYPeW2vTjmTzfcWbihrw2KeadPcBMSQLS5vZezofZLZ29SDBWhtZ9iUw2MweUo2JiJT+urUIwcJpnarcZHkze0uRqxjPJmAE2Rt2NBpYxMxmFjHuSTxReBf4us4ydtcpIXN0Dmtl/DD/A6ykJEFEREIn1pAkvKAkoU37ZDBJALi+qElCIomCmTnwdJ3FDA5vDkUAVgB6ZPTYpgKHm9kOZva1qkpERNx9fuCQGja5TlFrNZ5dgN9n8NCaCWY0LKykbsafrHP7/sC2OjUktH5Gj+trYAMz0ywVIiIyu18BXav87EzgZoWsVScAfTJ4XP8xsw+UKNRueAxlHKzzQkJZHHY0CtjUzF5Q9YiIyPfcvRe1PU140szGKHIV49k7TBSy6B9Fj39SicLLwOQ6y9jG3RfXKVL6DsKATTN2WO8C65jZ66ohERGZw9FA9xo+f5tC1qrfAj0zeFxvA48oUYjAzKYDz9dZTDvgSJ0fpbcKsFCGjucTgicJ/1XViIjI7Ny9W5goVKsZuEORqxjPAcARGT28v4fv5SpRiCiO2V8ODR/hSXltk6Fj+RbYysw+V7WIiEgLjgDmqeHzT5nZlwpbRedR/ToUaRoLXFuGCkgyUbgzhjK6AcfqPCm1rLzUPhHY0szeVpWIiMicwqcJJ9a42b8VuYrx3BHYOqOHd6GZTSxDPVjClTwSWKrOYsYAfc1skk6b0nUS8xAsZNIuA4ezt5ndqFoREZEK16yjgItq2KQZ6GNmoxS9n8SyI/A6sHQGD28y0M/MRpehLpJeq+A/MZQxL5oBqay2zEiScLWSBBERaeXGtj1wXI2bDVeSUNHRGU0Svr8nGF2Wikg6Ubg7pnJOcffuOm9KJwuPHN+mthfTRESkfHYB+tW4jWY7ajnpWhg4NaOHNwP4S5nqI+lEYTjB0KF6zQecrNOnVB1FE7BVBjqEPTXsTURE2vCrCNvcrbC16AJqm142TTeXbdbDRBMFM5sF3BdTcce6+6I6f0pjzTBBbKQLzexVVYWIiFTi7psBq9a42Ztm9rGi95NYbgXsmtHDawbOLVudNKWwj9tjKqcrcKZOo9LYrcH7H6X2JiIiVYjyNOF+he0nSUIX4OIMH+KtZvamair+iu/g7qM9HrPcfQ1FtfBtpsndP/fG2lM1ISIibVyvBrp7c4RrzEaK3k9ieZZn10x3X7aM9ZL4EwUzmwHcFOPxXhyOX5fiWh9YuIH7HwHcrGqQmC+CXRQFkcI5kdqnmh8HPK3Q/ah/XJZoT2bSclNZ11FK64b7uhjLGoSmSy26Rv+af3oZlmWX1C6AXd39H8CLioZIoc7txYHdI2z6cPgjqvDD5CVXAB0zeogz0VDkVBrCiBgfAY1x996KamFvqsY28PHii+5uqgmJqT0PdPe3ZmtfCykqIoU5vy+IeJ0ZrOj9KI5HebZdrVpKpyEcEnPF/UtRLWQ72bfBHcIeqgWJoR2bux/n7lPnaF/zKDoihTjHe7r7hIjvWi6oCP4Qx74R45iWae7eTzWVTmOYO4FfindUZAvXToY1sEP4Klw2XqSeNryguz/YQvv6QtERKcx5/quI15kXFL0f/aDyUMafJlxY9npK7aVgM5tIvO8qAFzq7vPqdCtMp7E0sF4DD+EqM5uumpA62vC2wKvAli38+V5FSKQQ53l74MiImz+iCP5gMLBFho9vHPB7JQrp+hvBghVxWQC4ROdaoTqNRr4f8E9VgUS8cejs7hcB9wDzV/iYZtISKYadgcUibvuowgfuvjDw14wf5rlmNka1lX7juD2BR0P7KLK5bxft3P3TBj5eHKFakIhtd4C7v9pG+3pTL8mLFOacfybidWayu3dW/HIx5Ogzd++q1p7+EwWAvyRQ5iXuvpSqM9c2BRZt4P5vURVIhAvePsALwEptfPQCTbkrUohzfi1grYibP2VmUxVFjiDbQ44ATjOzyaqqxp1oTyaQ/b3k7p0U3dy2iRsa/OvBiqoFqaG9dnX3q6tsWx/qJXmRwpz7/67jOnOy4ucD3H1Sxp8mvObu7dTaG9tQtkyocs9XdHPZHnqEj2QbZZSGhUgN7XX5cChRtfZX1EQKce4v5u4z6rjWrFby+HVw9xc8+zZWa/+fRgw9wsweAoYnUPQx7r6zqjV3dgO6NHD/QzUsRKq80B0IPA8sV+UmLxP/bG8i0hhHAu0jbvsNUPZ34U4FVs/4Md5uZkPU1LNxwd08oUxwvLsvpwjnqi083eBfD05QLUgbbXRud78+wsJKayh6IoXpA76r4zpzR8njt26dT2PSMNnd+6q1/1j7GirZgN0J5rkfBzwJDDGzaVF2bGaPuPtTxD9vfjfgDndf08zGqYoz33ksDazd4MN4WTUhrbTRlYF/A8vUuOmVZva8IihSCIOBnnVs/2SJ+9BewI1EfxqTlj+b2cdq6tEreu8Wsq9x4Uuoa0Ysc70EM8O73L1JNZf5dnVWg39BaHb3nqoJqdA+D3T3KRHa1Wh3n0cRFClEP2Du/lad15rVShy/23PwXsInmg61/oo+rY0gP+7u60Yo9+4EK/4c1Vym21RTeHI20ijVhLTQNju5+2V1tKsDFEWRwvQHG9d5nRlX1ll03P1wz4dd1NLrr+x9qhyTe767d6mh3OXcfWaClX+gai+zbWqzDHQOT6kmZI52uUgdCyq5uw/TLFoiheoT/l3ndeb+ksZtxQbPaFit+9TKK6tlaM79wMwqyjsWGFLtY3czewv4Z4Lf8R/uvqmqOpMGZ+AYXlM1yGwXtg2Al4i+oNJEYH/NoiVSmD5hQeDndRYzrIRx6wrcTGNnNKzGZIIF4KTeRMHMvgGqfWt/TeAJd1+oys//luAF6SR0AG7TglqZ60S6x9D5xuFV1YaEbfIY4DFggTqKOcHMPlA0RQrjwPA+oh5lfJH5YqqfRrqRztALzPFeSAeGL39W6313X6zai3TCj5a+cPclVYuZaUsHZeSR41qqjdK3xa4Rpj5t8fG1hhyJFKpvaOfuH8cw5WanksXtwJy8l/Cau3dQS4+/AdxWY0W8V82TBXdv7+6vJtwoPnb3PqrFTLSjJzPQScxy97lUG6Vuh0u4+ysxtKUxNTxBFZF89A8/i6FvGFqymA3MyXsJs9x9HbXytkWZPvRYgnG41VoSGNbWRdTMZhI84mtO8PsuTvD+xMKq+oZ2JAOIf/2MKEaa2STVSGnb4c7AK8DKMRR3oJl9oaiKFEocY9efKFGfOi/wH7L/XgLA+WY2XE08gUTBzD4D/lTjZksC97Y1X72ZvQhclfB37g/cp7nzG2rvjByHXmQuZ4Jg7v5/wC0ECzTW6yYzu1uRFSlUP9EX2DyGop4uS78KXAnkYdTGR8Dv1MqTbRCdIy4+8lRbU6e6e4+U5tYfrmEnDWk7TTGM+YzLKaqR0rW/bu5+Z4xt6EP96CBSyL7ijzH0DzPcvVtJ4nVSTt5LaNZMmOk1ikHhSVCru929fRtlb51Sg3mkbC8ZZaDdbJqhDmNb1Uip2l7f8OW1uExz90GKrEjh+ooO7v5VDH3EiyW6rs/ISaJwmVp4bZqibmhmLwBRVj7eHri0tdlBzOwBkl1b4XubAbcqWUjVfhk6lpdUHaW58K8HPAfEOU3yr8N+UESKZWtg/hjKKfy0qO7en2AYZ/scHO6nwK/UvNNtIB3rmDHkrDbK7pnSECR394fCxUEk2fbSzd0nZuRXhf+qRkrT7ga7+9SY28/dmgpVpLB9xq0x9RM7FTxO3d39zRwNOdpSrbsxDWWlOi7Cv2yj7I3DKazS8ERZxhI2sK0ckKFO41bVSOHbW5O7/ymJJLPaledFJHf9Rk93nxLTjen8BY5Tk7vf4/lxiVp3YxvM8XXMY7tjG2WflWJDetnd51ONJtZOns5Qp3GCaqTQba2Xuz+cQLuZ5u6rK8Iihe07Doupr3iz4HE6L0dJwhvu3lmtu7ENpl04i1AU49x9xVbK7hTTgkjVGqFkIZE20t9rW9U7aRuqVgrb1vq5++sJtZuTFGGRQvcfw2LqKy4vcIz2ytj1vDUztbBafZriKMTMZhG8pDo5wubdgXvcfYEKZU8jmHc/rYWxBgJDtShb7PYFsjKmexZ6kbmoF7DvX1peIYHi7wb+rCiLFPdHBuJbDHRIQWO0JsF6CXl5R+vPWlgtWw3oyDqyvmdbW2PB3fdOOQt9z90XV63G0i7M3T/K0C8MWmitmO1s/3BoUBLedfceirJIofuQU2PqL2Z5sEpx4RKpmKaNTXM4eUe17Ow1pPvqqNR73b1dK2X/NeVGNsbd11et1t0mNs5Y53G5aqVQ7auDu1+WYHsZ6+5LKdIihf9B6/2Y+oznChif+d39gxwlCd+4+2Jq2fVrSqDMw4BxEbfdFji9lb//Bngmxfj0Bh5w9+3VVOqyR8aO53lVSWEuXt2BO4FDktoFcIiZvadoixTaGkD/mMp6tGD9bEfgJmCJHB32EWb2iZp1dhtVPdNgNrv7Pq2UvYi7f9mAl2GOVs1Gagud3P3bjP3SMEA1U4i2tViCLy1/7y+KtEgp+pOLY+w3NihYbK7xfNHqyzlpWLfVUcnT3X2zVspe1d0nNaDxXeXuHVS7NbWDXTPWgYxSrRSiXa0TDg1M0v3u3qRoixS+P5k/xnuKse7evkCx+X3OkoQX3b2TWnV8krwIHgN8F3HbDsBNHiwN/hNm9jJwVAPidQBwm7vPpaZTtawNO3pWVZL7C9fmwAMEQwOT8hkw2MyaFXGRwjsC6BpTWU+Y2cyC9LV7A/+Xo0OeCOwdzpYpWU8UzOxz4Ng6ipgXuNfde1Uo/2rgggbEbHvgCU2fWlUn04vgvZMseUo1k+s2tR9wH8G0ykmZCuxsZqMVcZHC9yk9iPeHx4cLEpetgGvIzzSoAIeb2btq1flrbHfX+Rjp4UqP8dy9fUKrr1bji3DOdqlc9wdn8LHkGqqZ3LanU1Na5GcfRVukNP3KGTH3H0sWICZrufvEnA05ukqtOb8NbqFwmqp6nNdK+fM0cI7+6e5+pGq5Yt0MzVhHMlHvmOSyHbV39ytSaiN6eVmkPH1Lb3cfF2P/8UEBYrJ8DPdsaXvd3buqRScj8Rf1zOwL6n+sd5y771uh/G+BXYApDYhfB+Bv7n5ta4vFlbQDXgzI2hoUz5rZDNVOrtpRF4LpTw9KYXcPAScr6iKlcRLxDmN8IOf97eLAg8A8OTrsScDuZjZZzTmniUJ4M39jeLGvx2WVho2Y2UskN496NfYFnnL3vmpSP9grrfZVgydVLbm6aPUMb963S2F3I4E9zWyWIi9Siv5lQSDuEQF35Tge84X97aI5O/TDzOwttehinJQLuPvXdT5e+szdF2plH39t8OOvMeGMLKpv9zcy+HhSq2zn6CLu7iNSahdj3X1ZRV2kVH3MhQmsBNwhp7Ho5u4veP5oqGgBT8zdYmgYwyvNkevu7dz9wQY33Jnhy1HtSlzPAzPYoUwMV5eU7Leffu7+XkrtYpa7b6uoi5Sqj+nj7lNi7kuuzWks5nL3YTlMEh4s831WmlIdGmJmtwC31lnM2sDfK5Q/i+B9hRcbGNN2wGnA4+6+aEnb1S8yeExPmtl0nfKZv2itCDwNpDVzyClmdp8iL1IqvwU6x1zmnXkLgrt3Jhgulben7RoqWvAbgZ7u/mkM2eQJreyjt7u/mYGMd5y771my+m3v7l9l8NeH43X2Zb7tbO7uE1JsE39X1EVK18+sED75j9P48KY7V0mCuz+awycJX7t7P7Xk9KT+sqmZjSVYtbleZ7v7RhX28Q3BC5BfNDi+3YEb3P2CEi0pvhEwfwaPa6hO90xftDYl+EVu7pR2+QRwnCIvUjpnETz5j9OjZjY1R/1te+BGYNOc1V0zMNjMPlIzLseNwY0xZZZ9W9nHyuGLilnwkrsvXYJ6vSaDv0B84+5NOusy22a2TmC8cGtGuntvRV6kdH3NBmVfpDF8l/NGz6dfqxWX64TtHa5uXK9X3H2uVvazUco3Ia2Z5O5HubsVtE67xLx4TVxu0xmX2Tazg7tPTbEtfOvuyyjyIqXra8zdn09o4dVeOYlBU0Z/zKvGjUW9d8q6hv3KGg4POiyGolYGrqnUgMxsKLArkIUXWbsCFwGPhguSFc32xLt4TVwe0ameyYvWrgSTG6Q1LG8GsLOZvavoi5TOHsCgBModambf5SFJAC4HBuew7l4CDjQzVzMu583Cv9J4JOXuO4aZf1aMdffBBavLezL6S8RiOtMy11b2TuCFwrYcqMiLlLK/mcvd/5tQvzI4B9+/XY6fJIxy9z5qxeU+gXu7+5cxrV+wWRv72sXdZ2TsJLjbgxUi816P82YsEfveGzrLMtdWDgjXL0jTnxR5kdL2OX9IcDhxtxwkCdflNEmY4O6rqgU3VsNf8AyHIMWxjHo74KbWfj02s9uAvQmGIGTF9sCb7r5vztvSjkAWV6W8X6d5pi5aBwFXptz33AXoJTiRcvY5SwInJFT83WY2IcPfvT3wL2CfHFbdTGB3M3tZrVi+b9BxvYX/qrt3bWNfW7n75Axmz0/mdWYkd38ko79IbKCzKzNt5DB3b065/p/J2/zmIhJrv5PkWgFbZvh7d3L3e3P6JKHZS7YGlVTXqHt7fAt13VDF/jYIF0nJmsnufrLnaGlyd5+vAePNq/Fd+IuKNL6NHNSAJOHjIgzrE5HI/c7OCY+db5fR793Z3e/3/PqdWm92ZGZu+XAI0q9iKm4vdz+4jf0NA3YAJmWsTroA5wBD3H2lnLSj7Yh/AZs4PG5mM3WaN/yitQNwCZDm1HaTgJ3M7EvVgEgp+50uQJLvJt1iZrMy+L3nI1i8cuucVt0NwJlqwdJaI78jxrmN169if6vG+CQjicdv14Unfpbr7MGMxm9/nVENbxu7NeBp0wx330rRFyl13/OXhPuZVTL4nbf3YCHavHrQ3Tuo9UpbDb2Px7do13+rucl29+Xd/bMMnzxfuPs+WVxsxN17uvu0jCZZi+iMamjb2LRB7wKdpOiLlLrvWTHhWfjeztj37eTuf23A8M44vek5WbiubJqydkBm9ilwckzFLUYwE1K7Nvb5JrAe8F5G62lB4DrgiQwOR9oB6JjBmL1mZp/rFG/YhWtt4G6CoXRpusrMNBWqSHn7nibgMpKdhe/qDH3fZYFngONJd3hnnL4AtsnDwnVKFLLjMmBoTGVtCvyhigTlY2B9YESG62t94CV3v8Dd58nIMe2c0VhpWtTGXbiWAe4B5kp51w8Dh6sGRErtMGDtBMufBlyTkb72UOBFYJUc19dYYGsz+6+artR6AvQPFzOJaxjKTlXut3uGx9zPOaPPb9qaCjbhOuocYx3FbX2dRQ1pEwu6+4cNqO8R7t5dNSBS6v5nYXcfm3Bfc30Gvmdvd7/T829i+PRZMiyrTxQwsw+A0+IqDvinuw+oYr/jCWbxuTTjddcT+CPwvrv/0t0bMfxnQ6BrBmPzHcGjWEn34tUNuA/ol/KuPwG2Dc9dESmvi4AeCe+jofcG7r4J8CrBIqd5Ng3Y0cx0rVaiUJfzgeExldUNuMfde1aRLMw0s8OBg8nWKs4tWQj4O/Chux/j7p1S3Pe2GY3Jg5oWNfWLVyeC4Uarprzr0cAmZjZKtSBS6j5ob5IfCjvCzJ5q0Peby90vAx4F8j5RxzTgZ2b2qFquEoW6mFkzcCTBUt5xWBK4otrZg8zsSmA3srfWQksWAS4ARrj7L1KaYmyLjMbiYZ3aqV7AjOC9og0bcLHZLXz6KCLl7YMWCK9/Sbu6Qd9vBeBp4BDy+8Ly95qBg83sEbVcifMk+VPM4+J+VeP+V8v49Kkt+cTdjw+HgyRRJ0tk9HvPcPd5ddakIXRb2QAAIABJREFUen6e1YB6nuXueyj6IhLj+kttjafvkfL3Mnc/1t2neHEcoRYrSZwsXd39g5hvJjep8RgWcvfhOTwpv3P3i9197bjWYXD3duFCcFn0uM6YVM/NPRo0d/eJir6IuPveKfU5l6X8vRZx90e8WH6jFitJnjRbxNxgR7t7nxqPoZO7X53jk/R9d/+9uw8K55qOUg/93f0+/Voh7j6wQbNeXaToi0g4y9E3KT3BHJDi99o1pe+VpnPUYiWNkyfuX7Gfi/Lyb/jS8Iycn7Rfuvs17n6gu6/Q2qJ07t4+fCJxqWdzFebZO3OtxpzOuTifu3/cgDq+o60FFEWkNP3QvSn1O3en9H26u/u1XjznqbXml+WsU5gXeBuIcwz65WZ2aIRj2RT4N9C7IG1hCvAp8DkwJvy3jgQvSS9HNqdBndMzZraOTuvEz8P2BC+Mb5zyrocDm5nZFNWCSOn7oQOBK1Pa3QZm9mTC32c94F9A34JV1QVmdpxarBKFNDuHfYDrYi72QDO7OsKxLAHcSvpTQkrLfmVmf1EYEj8HLwKOSnm3I4F1zWyMakCk9H3QMsBLpLP6+3NmtlaC36UDcDpwMlC0p6UXAceamavVKlFIu5N4GNg8xiKnAuuZ2UsRjqVzeDIcrObUcP3N7EOFIdFzb3/SnyJwNLC26lZEwuHCzwCrpLTLXc3stoS+ywDgemC1AlbV34BjlCTkX1NOj/swYHKM5XUGbo8yraaZTTWzQ4D9Yj4mqc0I3UgmfoHegWC9hDRNBLZT3YpI6NwUk4QPgDsT6EvN3X9J8FSkiEnCxUoSlCg0VHjTcEbMxS4OXB91NiAzuw5YE3hXzaoh7lAIEk0StgVuATqkuNvpwM5m9oJqQETCfujoFHd5jpnNivk7LAjcC/ydfLz7V6tLgKOVJBRHblf4C8f1vQysEHPRp5vZGXUcVzfgKmBXNa9ULW9mbykMiZxrm4UXtk5p7hb4hZndqBoQEXdfGHgFmC+lXX4ALGtmM2L8DjsAV6T4HRqRJBypJKFY8jr0iPDkPTy8oYjTae6+RR3HNcHMdgOOJJhJSJL3jpKExC7OaxA8eu+U8q5PUJIgImE/1EQwI1CaN9hnxpUkuPvc7n4FcFeBk4TzlCQoUchisvAUcE0CMbmh1sXYWji2vwODgNfVzBKnYUfJXJyXA+4H5k55138ys/NVAyISOgXYJMX9vQPcEFM/uiYwAjiowPVzupmdoCShmCzvXyB8Afkd4l/P4FlgQzObXufxdQb+RPCEwdTkErGamb2sMMR6Xi0cngN9Ut71dcBgXXBEJOyLtiYY+pjmD5t7mtnNdR53e+C3wP8B7YtaPQRPf/XDjhKFzHckSS28cpGZHRPTMW5D8PRjfjW7WL1nZksrDLGeT3MBT5D+bBz3AzvGOSZYRHLdF/UHXgB6pbjb14GBZtZcx3EvTTBUao0CV88s4DAzu1IttdiaCvI9rgYeT6Dco919v1gyMrP7gWUIVnOW+FyvEMR6YW4P3N6AJGEYwQxHShJEBHfvAdyXcpIAwbSezRGPucndTwZeLXiSMJXgRx0lCSVQmKEw7r4iwSxIcT/imwCsYWbvxHScRjC929lAFzXBug0wM01JG995dAFwTMq7fYdgwcNvVAMiEl4nbwJ2T3nXD5nZVhGPeUGCGQ+3KXj1TCVYhO5etdRyKMoTBczsdeDPCRTdDfiPu3eP6TjdzC4ElgOGqgnW5XklCbFenA9rQJLwMbCpkgQRmc0pDUgSZkTt/9z9EIIfPIqeJIwGNlCSoEQhz/4IfJpAuUsRLI4SZ2LzMbA5cGrYQUntNIwrviRhEJD2C2nfAT8zs1GqAREJ+6LNiH9B1WpcXesPT+7e3d2vIVixvkfBq+YzYBMtgFk+hZuFx91/TnLTZf7SzP6RwDGvAlwLrKgmWbVmoI9uMmNpf/MDL5LuDEdTgC3N7EnVgIiEfVF/gtnW5k151+OBpc3sqxqOdX2CWdr6lqBq3gS2MrPP1ErLp2hPFDCzOwlegErC+eGcyHEf8wiCNRfOJZhJQNo2VElCLBfm9gRPZtKeBnV/JQkiMltfNC/BzGfzNmD3p1SbJLh7R3c/h2DocBmShCeB9ZUkKFEomuOA6QmU2wm4J5z6LO5kYZqZ/RrYAHhfTbNNWrU3HmcDG6W8z/PNTMPGROT7m+8uwN1AI6a6fhb4R5XHuSLwHHByge+fZnc7sIWZfadWqkShUMzsPeDChIqfD3jI3fsldOzDgZUIFmmbqSbaomlhByb1XZy3BE5IebfDgJMUfREJ+6EmgjUH1mnA7mcAh7Q1Haq7t3P3XxOs6TCwJFVzAbC7mU1VKy23wq4UHM5SNBJYIKFdfAZsHte0qRW+w0DgCmB1NdUfudPMdlIY6mpb8wOvJXh+tGQUwSraX6oGRCTsi84Hjm3Q7v9oZv/XxvEtA/wTWKskVTITOMrMLlXrFCjwozMzG08wxVpSFgWeCG/mk/oOr4Sd0wnAJDXXH2jYUX0XZgsvfGkmCTOA3ZQkiMhsfdGxDUwS3gDObOXYmtz9GGBEiZKEscA2ShLkR/eiBe+EmoDnSXaV2e+A7cIhQ0l+l34E4yi3LHmb/RZY2Mym6fSN3JaOA85Lebe/MrO/KPoiEvZDOwO30JgfLKcDa4Y/xlW63l4DbFiiKvmAYLrqt9U6ZXaFfhknHHd4DOAJ7qYX8Li7H5jwd/koXDFyH+DrErfZG5Uk1HVxXpngBeY0PUv6azSISHb7oY0I3kto1D3I6S0lCe5u7n4owbDMMiUJw4C1lCRIi/efJemUbgD2SmFXlwLHmNn0hL/PvASrUO9Xljqczepm9pJO3Ujtpn14075airudBqxqZm+pBkQkXH/gAWCuBh3Ck8DGZjZrjuNaFLiS8j21vwQ41sy08Ku0qKkk3/Nk0hnjfxgwxN0XTjS7MxtjZvsTTKX6Wona62tKEupybMpJAsCZShJEJLwZX5dgnaNGJQmjgT1bSBL2A14vWZIwDTjYzI5QkiClTxTChULOTWl36wAjwhWik/5eT4U3fscRrCxZdNfolI18gV4COCPl3Y4gmOZXRNQHrUWwoFq3Bh3CLGAvM/t8tmNa3N0fIJjcoWeJquMLYBMzu1ItU9q81yxRJ9UFeBtYPMXd/gs42szGpvD9FgL+AuxZ0HqdDixqZl/rtK25bRjwCLBpmrsF1jCzF1UDIqXvg9YAHgZ6NPAwTjGzs8PjaQKOBP4AzF2y6ngO2MnMRqllSjXKMvQIM5sCHEqyLzbPaR/g9XBhq6S/3xdmtjewCVDEoR73KkmIbHDKSQLAbUoSRMTdVwceanCScD1wTng8yxG8p3BhCZOEa4ANlSSItN5pXejpa3b3fyX97sJs37GDu5/o7hO8OLZT643UFnq5+zcp19WMcJEiESl3/7Oqu3/b4GvHU+7eObwunuruU718prn7kWqREoWVsOPqArwILNeA3U8keNR5fhpTfIazOFwObJ3zavvs/9m77zg7qvr/469PCiQhEAIkIZTQe+8dEVAURcFCkyJd8WsHRbCgoILIT1GQXgQRBGnSpRfpvfdOFAgEJKQn5/fHmeCyqbt729x5PR+PfWSzKbv3PTP3zvvOmXOApSJisodsl/eBY4CDGvxtT4+IfU1fqvRzz7R7EgY38ce4D9gaWIE8o9FqFdwULwM7RcTd7pXqjl5Ve8DFEKSdipP2RhtInsP+sZTSdg14rK8BnwG+Dowt8WY7zZLQrRfqEeRxuI00nlmsdiqpEs89W5Hvi2pmSXgY+Ap5QoU7K1oSriRPT21JkEWhiyfQjwF70dj7FTpaFvhHSummlNIGdX6sqViOfUPg6RJursnA6R6q3fILoF+Dv+e5EfGK0UuVLQmfL05Qmzn+/yny1Yy7gP0reK4zGfgReaXld9wr1aPzyIo/of0SOLTZPwZwCXBYRDxV58c7L/mmrs+VaDNdGhE7eKh2eVuvTp6etNEvkGtHxINuAamSzzv7kBce7dPkH2UsMKCim+HfwM4Rcat7pGqhV8Uf/0+Ay1ugrH2BPDvSqSmlJev2jSLeL77XH0q0jU72MO2Wo5pwfN9hSZAqWRB6pZSOIt8H0KcFfqSqloQbgbUsCbIo1O7EeSqwM/nyZLP1AfYFnkkpnZ5SWqZOj3lKRHybxt/g2h0vkOfeVtdetD9Gc25gP8H0pco93wwALgB+aBpNMxU4AvhkRLxhHLIo1PbEeSywHa0zfr8vsDfwVErp7HpNMxkRx9L8YVezc0pR5tQ1P2rC9xwN/N3opUqVhEWBm4EvmkbTjAK2jYifRsQU45BFoT4nzaOATwGttAhJH/KCbU+klC4p3iWu9eP+NXm61lZ1s3tnl1+41wK2acK3vjYiJroFpMo812wM3AusZxpNcxt5qNG1RiGLQv3LwkvAtsA7LbiNtgduTindn1LaPaU0Vw0f94+Bs1t0s/R3z+yyHzTp+15h9FJlSsK+5PHww02jKSYBhwEfL6ZBl+p3fmwE0z0BrkEeFz+0hX/MfwOnktcXeLUGj7k/cAewZos9zh0i4lL3yjnejsPJi+v0bfC3ngwMcxo+qe2fY/oBvwO+ZhpN8wzwlYi4zyjUCF5R6NycIh4GtqC1hiF1Nhz4KfBiSumylNK2KaVePXjM48hjTFvtRG9t98gu2bsJJQHgHkuC1PYlYTnyG0qWhOY5mTwFtSVBFoUml4UngY8Brb5wVG/ymghXAs+nlA5NKS3czcf8Ao1fxXd2NnRvnOMX8d7Afk369ve7BaS2fn7ZCbgPWMs0muIt4HMR8bWI+MA4ZFFojbLwXFEWXijJj7wk+cbkV1JKF6SUtkopRRcf83nkcaet4mMppQXdG+fIFsASTfreDxm/1JYFYb6U0qnA+cB8JtIUVwGrRcTlRiGLQs+f1CKldEPK3k0pPVIsYvaFYmxlV8vCS8DGwN0liqEv8GXgevJVhl+mlFbtwr//IXm16FYwF7Cjh+kcaWZOFgWp/UrClsAj5PV91Hjvk4d5fda1EdRMbXUzc3F59PyZ/PG75Nl9jo2IV7r4//YHzgR2KnE8jwLnAecVBWhWj/dK8gxQreAJ8rsprqcw8+3Vl3xPzUJN+hEGFPe5SCr/88k8wNHAgTjhSbP8E9ivq+cqkkVh9k9wjwCrzeavTQLOBX4VEc924f8O8rvtR9AaS9R3OybgzqJQ/T0i/j2Dx/pFWmvxrF2LYVGa8b65FfkKUjO8HxEOSZDa47lke+A4YIRpNMV7wEHA6RGRjEMWhdo+wa1B14ZAjAeOBH4TEZO68H02J78zv0gbxDYVuAu4BLgkIp4vHuPc5JV2W2Udg+fIVxXGe8jOcJ/8NXBIk779ixGxtFtBKvVzyNLAH4DPmEbTXAPsX4spz6Vaaqd7FLq6Gm2/oijcl1Ka4/UDIuJW8swPl7bJ9t8YOAZ4LqX0cErpcGBFWuu+jGWBQz1cZ2qrJn7vUcYvlbYgDEwp/Rx4zJLQNO8Ce0fEpy0JakXtdEXhr8Au3fzn44HvRMTJXfyeu5LfhWnHmXmmkKdfbRUTgfUi4hEP24/sg4PJU+c1a1vdExEbuCWkUj1v9CVPp/xTYJiJNM1lwDci4nWjUKtqpysKy/fg3/YDTkopnV0Mu5mzlhXxV2AV4BxaZ6agWundYj/PXMD5KaUBHrYfsU6Tt9VcbgKpNAWhV0ppR/IVhBMsCU3zMvD5iNjekiCLQuPUYsaX3YHrujJ3f0S8ERF7kIfw3OsuVVcrFS9u+p9mr149t5tAavmC0CeltHtREP5Gz95YU/dNAn4DrBIR/zAOWRQaq1bvNG8G3JpS6tLNyhFxF3kl4Z2AB9216uarKaVDjKFlisJCbgKpZQvCAimlA4CnydODr2QqTXM7sHZE/NDVlVUm7XSPwuvUdiaiF4DtI+LRbv48nwJ+QF4x17moa2sqsHNEXOiJQHoIWKPJP8a8ETHG3VJqieeEpYHPA9uR3/jqYypNNYo8tfqZTnkqi0JznxyfJc+OU0vjyTd7/b4rU6h2+rlWAPYB9sDxoLU0DtiyuJJT5ZOCt2j+u/qrd7dQS+rxc0BfYFPyIpmfwasGrWIqcDrwo4h42zhkUWj+k+VN5Hfv6+EJ4HsRcW0Pn8y3J692uYW7Xk28CWwVEY9V9AShHzC2BY7jvSLiLHdHqWHH/rCiGGwLfAIYZCot5TbyTIoPGIXKrp3uUXi5jv/3ysA1KaWbU0qbdKuRRUyKiAsj4uPkmZJOAP7rLtgjQ4GbUkprV/TxD2uRsr+Ju6JU12LQK6W0Xkrp8JTSvcBI4AzgS5aElvIKsDPwMUuC2kU7XVH4AXB0g77dlcBPIuLBHv7MA4HdyFcZVnN37LZ3gW0j4s6KnTwsT75Jsdmej4hl3Q2lmh7fw4CtyVcMPk1+Y0StaSx5NqPfRMQ445BFoTWfVDcHbmnktwQuAY6pxTj5lNJmRWH4As5N3x1jyDef31ChE4mVgcdb5MdZMyIedjeUun08DwA271AOVsOJMFp+swHnAz90VWVZFMrxJPsezZnh4Tbgt8AVETG1h49jYeBrxYc3P3fNJGD/qoyXTymtBrTKStVHRcSP3AWlOT5+e5GnN/5E8bExrktSJrcXBeEOo5BFoTxPvLeQ35FplqeAY4FzImJCDx/L3OSxjt8G1nJX7ZIjgJ+1+1R0KaUlgJda5Md5AxgRERPd/aSZHrNLFaVga2BLYEFTKZ3HgEMj4nKjkEWhfE/C3ytO1JvtTeBk4MSI+HcNHtfmwLfIsyb1dredI5cCe0fE6DY+6ehPHhvbKvaLiNPc9aQPj9HBwMf531WDZUyltF4GfkZ+I3CqcciiUM4n5WWA51roR5oIXAQcFxF31+DxLQl8A9gXmN/dd46e2Hdp55ucU0rvAfO1yI8zElguIsa666mixWAgeU2DLYqPdfHNnbIbBfwK+FNPRwpIFoXWeKK+nzzus9XcA/wB+HsNhiUNBPYEvovvUM3OJOBrEXFGm56YPAys3kI/0s8j4nB3O1WkGMxDnh54WjFYD1dCbhfvA8eRJyxxKnNZFNroifvrwJ9a+Ed8izz/9ckR8WIPH2tv8ixJBxcvUJpJVMCPI+JXbbi/nwHs1WLF7EDg9Ha/R0SVLAYDyDcdTysG6wN9TaatjCWvc/SbiBhlHLIotN8T+XzkIRDztPiPOhW4BjgRuKoGsyVtURSGT+OUejNzbEQc1Gb7+4HFi1qruRP4ulOmquTH12DyFYPNyEOK1sXpq9vVeOAk8gxubxiH1KZFoXhyPx3Yu0Q/8svkm5/P6OkTVEppVeAgYBdf0Gbo6Ig4pI329bWB+1v0x5tclJifeuleJTmeFiXPnLdpUQ5WAXqZTFubAJwG/CoiRhqHVI2isDLwaAmf4CcCF5NnS7q1Bi943wH2p3Vudm0VR0bET9pkXw/gRWCJFv4xRwLfj4jz3fXUYsfPih1KwWbAUqZSGROBs4BfRsQrxiFVqCgULwB/A3Ys8UN4nPwux196Mk4ypTQIOIC8HsMi7vIfOjQift0m+/r/I9/Y3upuAL4REU+7+6kJx8kg8j0FGwIbFL+6jkH1fACcCvw/V1OWql0UVgMeovyXjScClwNnAtdExJRu5tEP2A/4IbCouz6JvIrzaW2wr28E3FGi/fm3wK8jYoy7oep0TPQGVi7KwLSPFXEYUZW9DRwP/DEi3jYOqeJFoXixuBD4Uhs9pJHkoUkXAbd25wboojDsWxSGxSq+/08BvhwRl7TBvn4X+V3SsngT+AVwSkRM8qlYPdz/hxb7/wbARuRZ4OY1GQGvAf8PONU3JySLQucXj2XJQ3ja8abeZ8kzJp0WEe93I5u5i8JwSMULw3jg0xFxc8n39c+TV6Mum+eAHwMXOJ2q5nBfn588+1DHjyVMRp08BRxDHr470Tgki8LMXlSOIc8C1K7eBI4gvzM7sRv5zA3sUxSGxSt6HLwHbBERD5V4Pw/gEWDVkj6E+4AfRsSNPi2rw349kLyAZsdSsCxOAa2Zu4l8BaHH045LqkZRGAQ8Awxt84f6DPB/EXFdN3Oamzyl7CHAiAoeC/8BNo2I50u8r28JXF/y4/oa4BDXX6hkKRhAXmW8YylYEehtOpqNScAF5BuUHzAOyaLQ1Reg/cnrFFTBBeSZZUZ1M6u5isLwowoWhueLsvCfEu/rfwb2KPl2mAr8nTyN7aM+Tbflc/KiRSlYA1ir+Hw5S4G66F3gFPINyq8Zh2RR6O6LUi/gX+SZL6pgJLBnRFzfg8zmBr4BHAYsUKFj4iHgY2VdICylNAR4kvaY9jEBlwFH+C5haZ97+wIrFYVg2seawEKmox54mryY41nduUdPkkVhRi9YKwMPAHNX5CFPJU9D+ZOe3MiVUhpMvrrwTaBfRbK7FvhsREwu6b7+KeAK2ufd2QRcVRSGu33abtn9bhHylKSrdigFq+AK8aqNKeSpwv8EXO/kB5JFoR4vZD8hT8lYJfcBu0bEsz3MbgRwJPAVqjEX+SkRcUCJ9/VvA79vw+3yT/KQpNt8+m7avjWCfO/AKkUxmPYxv+moDt4iLz56ckS8bBySRaGeL3B9ixPn1Su2nd8Hdo+Iy2qQ4VrFCejmFcjtkIg4usT7+ynkRfba0V3AscAl3V2EULPcd4I85ejKRSFYkXylYEVgPhNSg47xP5GnTp5gHJJFoVEvgOsDVRy+MBX4GfDLWlyyTSntSJ6jekSbZ7ZLRFxQ0n29N3AWsFsbb6Pni+J6ZkR84FN6l/eRhcjTjS5XfEz7fCVgHhNSg70DnENeH+gx45AsCs16cXyY6l1VmOYCYK+IGFuDHPsDPyg+BrRpXuOBrSLijpLu673IM37t2+b79dvAScDxZZ61qs5lYFlg+Q6fLwsMNiE1excFbgROBy726oFkUWiFF86fAj+v8HZ/CNi+VuM9U0qLA78DvtimeY0CNizrGgvFMJLjyDekt7uJ5JmSTgVuqMKCS8WUxouTr+6NAJaxDKgEXidf8TwjIl4wDsmi0EovrMuTp1ersneAz0fE7TXM9VPkd6/bcTjSi8BGEfFGiff7XclzjldlSMm/gbOBkyLipZJus17AksDSwCLA8OLzpTt8rSqzkan83gfOI08Wcb9xSBaFVn4BfoI8DrfKxgBfjohrapjrYPJNpl9tw/3rNuATZb40nlJal7yY2RIV2s8nkadVPAO4plVufi7uIRlCXjV+YWBY8euIohwsUXw+yJcqlfnlFrgD+DP5xuT3jESyKJThhOkw8nSfVTcZOCAizqhxvluT371eqs3y+itwOHlBs4FF2XoTeKksw1xSSguSh+bsUMH9fWRxwnJWRDxTh2z7dDj5X6T4fBj5SsCQDmVgaPH7Xkjt6SXyjclnR8RzxiFZFMpWFJYAXvCF+kM/j4jDa5zxfMAfgT0qkN/bwA3A+cBlZSgNKaUvFNtnkYru8w+Sb+6/YNoY6eIG/UHkKUAHFR/zd/h8vhl8PrjD50Oq/LyqyhsJXAj8DbjLRdEki0LZy8JNwBbuBh86Hvh2rU9yU0o7kWekqcqCTM8CP42I80twDAwCjgL2r3hpHk2+d8NVhKWuGQVcRH6T5NYqTCAgWRSqUxT2Jk/Jpv85B9g7IibXOOsR5BtLP1ahLK8G9ouI10twLGxAvrdkEw8BSbPxDnl2sQuA62v9eiHJotAqJ0cDgdfwZsHOLgN2qvVNu8XNm78FvlOhLN8kL9p2Y0mOiS+SrzAs62EgqYM3gEvJVw9ushxIFoWqlIU/UI355bvqWmCHiBhXh8y/DvwB6FORLCcDu0XE30pyTMwFHEheyXt+DwWpsl4DLi4+bm+VGcMkWRQaeVK0IvCEWczQ9eS1FsbWIfdtyDe8VeVqzpSiLJxfomNjGHAMsJvHh1SNl0TgPuAK4ErgAW9IliwKPjOmdD2wlUnM0E3AdhHxQR1yX4M8U9CCFclyIvCpiLipZMfHZsAJwGoeDlLbGQNcVxSDKyPiP0YiyaLw0ROhHciXVjVjx0bEQXXKfm3gRqpzZWE0sGZEvFKyY6QP8EPycKS+HhJSqb1YFIMrgJvLvIikJItCo06CngKWMY0ZmgSsHhFP1Sn/bYoXrKrcs3Ab8PEyjvdNKa1Dnr1qZQ8LqTSmAHcWz7NXRMTjRiJpTrjYGFDM3nC0ScxUX+CklFK99pdngHcrlOdmwE9KeqzcD6wLHEcezyypNb0CnAHsCgyLiM0i4mhLgqQuve4bQVbM9PIcsLhpzNS3I+IPNc69F3AXsF7FspwCbBgR95X4mNkaOA9YyENDarp3yPeUXQ/cEBHPGokki0JtT3y+SZ62UzM2ljy+/tkaZr4jefajKroT2KTMs4qklJYALgHW8vCQGmoccDt5QojrgQddFVmSRaG+Jz39gReAhU1jpv4FbF6rF6SU0vnAThXOc4+IOKfkx80A4HxgOw8PqW4mAw8UpeAG4I6IGG8skiwKjT3pOYg8d7xm7qCIOLZGeY8Ehlc4y1eBZSNiYsmPmz7AScA+Hh5STfyXfNXxDvIbNHdHxBhjkWRRaO4JT3/gSWAJ05ipCcCmtRhfn1KaDPSueJ57R8SZbXDsBPBnYHcPEanLXioKwR3kIUWPuxKyJItCa57w7Ey+SVMz9yKwTkSM7mHW71KdNRRm5klg1XYYX1xcWbgS+KSHiDRTE4CHyVcM/gX8KyJGGoski0I5TnaCPNf9JqYxS5cDO/TkXa+U0l3ABkbJaOBN4C3ycKR7gfuAB+qxKnadj59FgMeAwW5WicnF8XBfh49HImKS0UiyKJS3LKwO3E91FgHrrlMi4oAe5Pwb4GBjnKkp5CsO/wJuBW6JiNdLcPx8B/idm08VM5W8eGfHUvBQRIwzGkkWhfYrC78Fvm8Ss/WriDismxmvB9xjhF3yAvmK1y3AbRHxXAseOwsBrwNzubnUpt4CHiFfLXis+PwJbzhvce/eAAAgAElEQVSWZFGoTlGYB3gIWNY0ZutM4MDuTNeXUroD2MgIu23ktNJAvurwRCuszZBSehJY0c2jkhsNPA08XhSCR4FHI+JNo5FkUbAsbECegcIhSLN3P7BPRDzcxYy3Aa4xvpr5D3DdtI+I+E8Tjpvexc/hqs0qg0nAK0UheLL49RngSQuBJIuCZnfS82PgCJOYI1OBu4B/Av+IiAfnMOMLgC8bX+13X/K7oNOKw60RMbYBx8zewOnGrxYxtigCI4HXyLO2vUiekvQl4DWnIpUki0J3T3p6AZcBnzWNLrsUOHh24+hTSguTpwscamR1NYF8hew64KqIeLQOx8ue5MXX+hm3muw84Bs9ncZZkiwKmt3JzyDyTbfLm0aXjQV+DBw3q7UCiiFIVwG9jKxhXiave3Al+erD2sBy5NWyFwX6F3+v/yxO/N8t/i1AX2BxY1WL+FNEfMMYJMmi0IiysBz5ptHhptEtVwBfiYj/ziLjnwC/MCpJNfDjiPilMUhS9/jObVdaVcSzwFbAG6bRLZ8F7kopLTmLjI8ATjQqSTXwhBFIUg/OfY2g61JKK5HfHV/aNLrlNeATEfHUTPLtTV6d2Cs3krprKjA0It42CknqHq8odKddRTwJrEke162uWwy4M6W08Uz+fJglQVIPPWpJkCSLQrPKwvvAzoAvRN0zP3BVSmnTGfzZZsYjqYduNQJJsig0syyMAX5nEt02CLi6WNSuoy2MRlIPXWoEktTDc10j6JmUUn/yCp6LmUa3/RfYMiLuLzJ9hjxFpyR1x0vA0hGRjEKSus8rCj1tWhHjgN+YRI/MB1yeUlo2pTTckiCphy60JEiSRaFVnAQ8Zww9Mhy4CfiqUUjqoTONQJJ6zqFHNZJS2h042yR6HqX7paQeuC8i1jMGSeo5ryjUzl9wlg3Lq6RmO9YIJMmTspaTUlofuNMCJrW9CcCzwCjgreJjFDC2098bSL4HZ0NgfWOru5HAUhEx0Sgkqef6GEENW1fEPSmls4C9TUNqG2OA24C7gMeBx4DnI2JyF95EuMcYG+JPlgRJquG5rRHUVkppKHm61EGmIZXWFOAfwBnAtRExqQfPCZsDtxhp3Y0DRkTEKKOQpNpwiEytm1fEm8DhJiGV1oXAKhHxhYi4oiclofB9I22I0ywJklTj81ojqL2UUh/gIWAV05BKYxLwfxFxSg2fC1YAnsA3ZeptArBMRLxuFJJUO7541aN95bHLXwOmmoZUGgfWsiQUvuvzbEOcbkmQpDqc0xpB/aSUTgAONAmp5V0UEV+q8fE/BHgZ6G+8dTURWDYiXjUKSaot3+mqrx8BvnhJre+oOvyfB1oSGuIMS4Ik1YdXFOospfQZ4HKzllrWyIhYtMbHfT/y1YShxltX44HlLQqSVB9eUah3E4u4EviDSUgt66Y6/J97WxIa4jhLgiTV8TzWCOovpTQ3cDewhmlILWf/iDi1hsd7L+ApYDmjrat3yDMdvWsUklQfXlFoRBuLmADsSZ7CT1JrubfG/99nLAkNcZwlQZLqfA5rBI2TUvomDkOSWslUYGBEjKvhcX4TsIXR1tWb5JmO3jcKSaofryg0spVF/JG86quk1vByjUvCupaEhjjSkiBJFoV2tC/wjDFILeHpGv9/3zPSunsCONEYJMmi0HYi4r/Al4FxpiE13Su1+o9SSqsUx7bq67sRMdkYJMmi0K5l4ZHihGKKaUjlLwoppWHAlUAfI62riyLin8YgSRaFdi8LVwKHmoTUVD2egz+lNAC4AljCOOtqPHCwMUiSRaEqjgH+YgxS07xeg//jeGBdo6y7P0bEi8YgSY3j9KhNVizG9k9gc9OQGm6FiHimB8fvkcBhxlh3bwDLF/d4SZIaxCsKzW5qeTG2zwH3m4bU2J4OvNaDknCwJaFhDrUkSFITzlONoEXOWFJaCLgZWMU0pIZ4NSJGdPN43R84yefQhrgfWD8iphqFJDWWVxRapbFFjAI+DjxpGlJDPNTNknAUcLIloSGmAgdYEiTJomBZiHgL2Ia8oJCk+nqsGyXhCOCHRtcwF0SEwzIlyaKgoiy8CmwK3G4aUl3d2IWC0DeldBrwY2NrmPdxOlRJsihourIwGtgK+KtpSHUxCrhpDkvCUOBWYB9ja6hDI+I1Y5Aki4KmLwsTgT2AY8mzs0iqncsjYrYro6eUNgbuADY0soZ6iHyzuCSpmeejRtD6UkpfAs4A5jUNqccmAavOav2ElFI/8jCjQ4DeRtZQU4GNI+Juo5Ck5vKKQhnaXMTfgfXxJmepFo6YTUnYjnyj82GWhKY42ZIgSS1yDmoE5ZFSGggcCXwD6GMiUpedBuwfEanTsTU3eeHDHwDrGlPTvAGsGBHvGoUkWRTUvcKwBnACsIlpSHPs/qJkv1X8fh5gNWBr4PPAAkbUdF+JCCdxkCSLgnpYFgLYE/gFsLiJSCq56yPiE8YgSRYFdaUUQMRMZj5KKc0FfBX4HrCCaUkqoQnA6rO6d0SS1HiOc2/dcjAXeXrU7YChCT4AxpNnbJlYvLBOJE+jOgG47GFY9FFY5WVYfiwMmFD8xYkd/sGE4j+Y9meTO/06qcOvk4qvTyl+nfYxddpHao1ZW3tFEOQ783sXO3Xf4vd9io+5iq91/rXj53MXn0/7teNHv07/dkZ/Z+4O32sgMBa4BTgGeC85w600C0dZEiSp9XhFoTVLwkDgL+Rx0434fkwpfp3a4fMpnYpCx8LQuTxMLL42qdPXpxWOjn82qcNH6nBiP+2jT4ePvrP4vG+HYtCnw+e9O/0fvYsdvXdRHjp+NMIj5LtkX7IsSDPyFLBWRIw3CklqLV5RaE0/6lAS7gLuA/qT39ie2Xn0DM+rX4VlAvpOO3nuNYuPaX9nrjZtkNOKUCpKytROHzMrSB1Lz5RZfD6pQ2l6D1iavLz26rhylDQTU4A9LQmS1Jq8otB6J7ODgSeBYcD15BuWx/G/kS3T3iSf9kZ56vCC2/HcdeJGsPJ9cHvH5Wd7F/9R/+LXufnoO/PTPu/467ThNp3f+e/bqZ10HMYzoxbT8WvTvl/M4KR80kxO0Cd1+uj89yfy0SsWM7qi0fnKxrRhWeOKj0kdSkXHx9+P/w03mla4Oh5A04rHtO83rvj678g3kEwmT1F1t1cVpI6OjYiDjEGSLAqWgHx+OYhcBhYChgLDi1KwUPGxFLBx8U9Gke9LGNDpfD46bbupfHQU0ARg/Jsw4FkYPhp4ExgJvAI8B7wIvAZMqviJa+8IhhehLw0sCSwCLEyeK3MQeTnsafcozNWhQMT02/cjbW1C8fWhxa9PAk8D7xQb9k3gdeA/5Mnj3wBG0zr3fkh19iL5BuYxRiFJFoWqlYLewAhgVWBtYGVg2eIcdBB5DvemGVucsL5MHiD8CHmS+UeA/7bpiercEaxCXk1rTWDFoiAMId8U0kzjgTHkCf5fLgrFA8U2eQqYbHlQ2z1F8smIuN4oJMmiUJVXvqWA9YGPARsWxWDesvz8U8nvbj9Cnq3nRvLNEVNKfJK6SgRbAlsA6wCLkq8GlMUE8lWgB4Fbi49HLA0qv5Mi4uvGIEkWhXYvBysD2xYf6wDztctjmww8S75R4qLiJLUMw2LWjuBLwDbAKuRhQ+1iAvkKw3XAxcAdlgaVz8jc4eNdo5Aki0I7loMhRTHYBdiMfA9BW5sKPEyes/Vs4K0WO0HtF8Eu5Du/NyLfeNzuJpOv/lwAnI/Tr6o0to2Iq41BkiwK7VYQVgT2AXYGFqtqDiOBM4HfAqObfHLaJ4JvAAdS7WWp3wWuBk4EbrUwqHWdHxG7GIMkWRTaqSCsDnwT2IkS3XNQb8+TVx0+lebcx/ClCA4h35ysbDJ5WNLvgWstDGotbwCrRsQoo5Aki0I7FIQlgR8Au9P8iXFa1j3kd/Tva9CJ6aIRnAx81uhnagr5CsORwF0WBrWGz0bElcYgSRaFsheEfsW57w/IaxxoNkYBOwI31vmkdNkIriCPAdPsjQXOAH4KvGNhUPOcGxG7GYMkWRTKXhLWB44FNjWNrhlJXimuXjfVzh3B7cB6Rt1lzwKHARdYFtR4/yYPOXrHKCSpXHoZwYcFIRJ8B7jBktA9i5BvcK6XQy0J3bYceWakkyKYK3x/QA11gCVBksrJM4ZcEgYBJwBfMY2emUheae6BWbxzvUwEawLDi6b6FvAo8Ngs/s2gCJ4syoh65g7yTTfPe3VB9XdaROxnDJJUTn0sCSwJnINXEWpiLvLiEjOycwT7AQ8x/dRR44F7IzgLOAWY1OkkdgdLQs1sTL5stlkEt1kWVD+vAwcbgySVV6WHHiVYCbjKklBbH+v0+5Ui+GcE5wNbFSUhAe8XH1PId4+vR76scw+wZafhMVsba00tCVwKfNZhSKqPqcC+rr4sSRaFMpeEy8m/qoaWAhYoTkB3iOBm4JPFnz1LnrJzc2C14mMT4EfkVYYB1gKuAL7T4STWjVR7CwLnAdtZFlR7B0fENcYgSeVWyTOEBCOAa3GWzbqYQL55dgPgz8CA4mu/A44APpjJcJfeEXwH+Akwf/G1HwNHpsQrEYww2roYDWwP3OIwJNXG8RHxTWOQJItCGUvCfMCVONyorkXhcPJVgvmAt4H9gIvn8ER04wjOJV+ZmAr8AtgXWMxo6+ZV4OPAc5YF9cw/gC9ExBSjkCSLQhmLwqnFeafqZErxMRf53eov0vWF2FaJ4GrypZ8pebt5532d3UkeEjbJsqDuuRz4ckRMMApJag+VukchwR6WhPrrXZSEyeTlrbuzWvPjKfEV8s3OvS0JDbERcIwxqHvOAnawJEhSe6nMFYUESwB3A8Pc7I1xLPD9Hr47/e0IjjPKhpkEbAtc51UFzbk/A/s43EiSLAplLgrnALu5yRvjYfJ0pxNrcMJ5bQTbGGnDPFhsu8mWBc32aZWfAr+MCHcWSWpDlRh6lPLQ653d3I0xGTikRiUB8opNHxhrw6xFHjImzcJ7wLYRcaQlQZIsCmUuCb2K81aHuTfINcDVNXw3+pGUOMtYG+o7wADXV9CMvQB83HUSJMmi0A42BD7hpm6MycDRdfh/jyG/hanGWBrYyxg0vVOAVSPiQaOQJItCO9gfryY0zPXAbXUY2/5SSlxgvA21L9DLqwrKxgEHRMQBETHOOCSpGtr6LCDBcOAxYAE3df1NIi/adXudboJdIoKHgMFG3RBTgK2Am72puepuA/aLiKeNQpKqpd2vKHzKktA4D9WxJAC8nBK3GHPD9AZ2NIYqGwXsGBGbWxIkyaLQjj7rJm6cixvwPRx+1FhbAb0dflQ1CfgLsEZEXGgckmRRaMdXusHABm7ixhjToJP4fwCvGXfDLA2sYQxVchuwfkTsHhEjjUOSLArtamVgETdx484unm/AWPYxKXGVcTdMX2ATY6iCJ4AvFMOM7jMOSVK7F4W1qNDK0832lwZ/r8lG3jDrGUE7e4Z8K8qqEXGJcUiSqlIUVnXzNsarQCPPMG5NiYeNvWGWN4J29BzwNWD1iLjQ1ZUlSTPSVusLJOgHDAMWBtZx8zbGxcDYBk+h+Vc3cMMsDXw6gheBl4DxTpdaZveR1y+8KCKmGIckaVZKPzQnQX/gM8DngHWBxYB53bSNMQHYCHigwSePwyN4HOe+baRxwJvA48DVRVl729JQlk33N+DEiLjHOCRJlSgKKReDE4uCoCa4Cfh4k04W/xLBbm6CpnkFOBj4m2WhVT0M/Ak4LyLeNw5JUmWKQsrDi+4Eliy+9ABwc/HiOBE4FRjoJq6vPYE/N+lEcYsIbiAvDKa6HmscCrwBrAZsXDTz3sBY8mrcd1sWWsX7wKXAOcD13nsgSeqJMt+j8ImiJEwEvgGcHflzEiwFzOXmra+XyOMZ/tyk739zStwd4WIZDXg34Sngkg5l4PMRnEke+rWzETXbBPJosPOAyyNinJFIkqpeFKa9kTwVuHtaSSgsQJvdqN2KziPf2No7guXJ06hMasA7yytG8Bp5TYUzcVW9Ruh8L8gN/O+A62s8zTAGuAa4DLgiIt41EklSrZV5etSbgdHkmY5+n2BAhz+bm/ae+rUlzlJOBNaM4D7gIeBBYKOo32i2ERHcGsHDwKPADhGcgSs1N0K/Tr8/iTz2bypwhfE00qXkyRuGRMSXI+IvlgRJkkWhk8gjX35V/HZL4PwEixe/dz2uOrsaeCUl/kJe2W5u8sIVv67j9zwE2Lz4XksBpwPzA+e6OepuavHrwAhO73AT+QXANd6f0ChvALtFxFURMd44JEkWhVn7fXG+CLAdcGeCnwPrk+/BVB1MAY4FDoiYblW7hev4fZfo9PsFgJ8CxwEfuFnqakXgsAgeBPYpvnYncIDRNNKREeGuLklqmHZYR6EPuRx8n/xms+rsNvJMN08By3b6s/OAXer0DvNPI/hFp6/9F1gB+C3wFTdNw1wE7AuM9mpCo7wErBARE41CkmRR6Hph2Aj4P/JsSEPctPUxhXzpZinghE5/NhH4GHBnnU4eh0XwCHnp7Y7+AJxMXnK2v5uobsYUGZ8MnGdBaLSvR8RJxiBJaqS2ueE34M7IbyqvAexVnNOqxl4AniFfvunsijqWBIA3UuLUGXx9z6Lx3uvmqZujgTWBLVKyJDTeXcApxiBJasL5dftJsCjwNDCPm7i2JgPvAgt1+vp4YDPg3jqfRC4YwaPAIp2+/jZ5dT3HntXHNsC1FoRmmAKsHxEPGIUkqdHadQrRyeRFY1VjfWZQEgD+0YCSAPB2SjN8a3VBS0I9i7cHU/OcbEmQJFkUamsM8L6bt/bta0bGU99pUTs7jpmvnTAFp7uqtQnAW8bQDK8AhxmDJMmiUFtjgVFu3tq5APg8MG4Gf3Yp8GADh6WMTomTZ/Jn3wJ+6eaqeet+2xia0ct3cTE1SZJFocYiv6n8hpu3RifmwDfJ05D2m0Ej+3UTfqbjgFdn8PV1gR+nxJNutpp5F3jHGBrtVxFxhzFIkiwK9fGqm7c2flOUhe8z/d3vlwAPN+Em1/+mxJ9m8PVdgJUiOBinvaqVN4Ap3sjcSP/CC2OSJItCXT3n5u25e4Bfp8SPyFNJdfQBzbmaMM0fgZc7fa0fefW9K1Lib26+mnjZCBr9vPU5F1aTJFkU6usZN2/PjCUPORoUwddm8Od/Bx5r4jvNY1KabtE3yPdSrB3BN5n5Tc/yQGrRQ27HiHCklyTJolBnz5En5FE3/Qa4OyW+Bwzv9GcfkBfharYTgBc7fW1u4BDyVKrfwyFIPfWYETTCJGCniHjQKCRJFoX6ew0Y6SbunpuAXxSff3oGf/434IkWGLf+QUocP4Ovbw4MjOCClDjNzdlt44HHjaERJeHLEXGFUUiSLAoNEPlNbye/6YbXgb2BqTMpAmOAo1ro5z2RGd+QMu2n/xZwn5u12237WWOop6nAARFxmVFIkiwKjXWPm7hrJgH/B7zYoSScTF50C2Ai8BPgmRaaBWdsSnyXPDPTNGeQrzYATEiJPXFhje54ApjsjEf1Mh7YPiLONApJUivq0+aP7043cdccAVzS6cTw9JTYNIKNgbuAW1vwxPHylFg5gm3I74Bf0elnfDwlvhzBX4G+buY5dgfwOWOoV0nYKSIuNwpJUquKdn5wCYaQh1gPcVPP3jnA7m3+7vGPIzjSTT1HJgObAXd6RaHWXgW2i4iHjUKS1MraeuhRwFs4PH2OXE++L6HdHZkSJ7u558grHjz18BiwuSVBkmRRaA3Xupln7QFgR2BSRd45/jpwsZt9tu6o0D7RIH8A1ouIl4xCkmRRaA3X4XoKM/UEsD3wToVOCKemxC42yNlyrs6aGUOe2ejbEeFzkSTJotBCnia/aa5OniWvYvxKBd81npASXyCvF6HpvQFcbQy1cA6wXEScYhSSJItCi4m8MO9Fburp29NngGcrPLTkg5TYDrjB3WE6NwLvOeyoJ14h37C8R0T8xzgkSRaF1nUJ8L6bO3sE2JbWWguhWcakxOeAq9wtPpSAc42huyYAvwfWdKVlSZJFoQQCXgT+6eaG24FPAM9bEj70QVEW/moUADyFw466WRCOBhaPiO9GxGgjkSRZFMrj9Kpv7IuBTwJvWBKmMzkldk2J3xoF5wJT3EfmuGcCfwJWj4hDIuItI5EktYuoygNNMBd5xsd1qraRpwK/Aw4mz/ijWftmBEcB81TwsY8CVrFMzqlzgG9FxLtGIUlqR5W5ohAwETi+aht4NPA14PspWRLm0B+LGZFerOBjP9+S0BUPWBIkSW1+/lwdCQYA9wIrV+HxPgzsC9zriV+3LBbBacCnKvJ4/wusAbzo/jKnNoiIe4xBktSuqnSPAgFjgZ/mztDWhYjTgU0sCT3yWkpsCxxO3nHa3fGWhK64w5IgSbIotF9ZuAg4tl0f3yvAHsA+KTHGk74em5oSh6fEZ4AH2/hxXg38xM3dFd73Lkmqwnlz9SToC5wF7No2J7TA34CDgNctCHUxdwS/BA4kj2FrF/cCW+MCa11wcUR80RgkSRaF9i0LA4AzgR3L/lgeB34G/N0TvYbYNIIjgS3a4LHcB2wH/Nt9Z069B6wWEa8ahSSp3fWq6gMv7lf4KnBGWR/D28AR5PleLQmNc3tKbJESX6PcMyPdTF6h25LQJXtbEiRJFTpfrraUMziMfJNz3zL8zB8AFwC/BJ7zJK+pFojgYGA/YKES/dx/Ln7mie4/XXFBROxkDJIki0L1CsNngN8Dy7bqzzgWuAw4BnjAE7yWsmQE3wV2AxZs4Z/zbfIwtePdf7rqLWCliHjbKCRJFoVqloVh5CsL+5JXcm6Zk7tLydNXPugJXssXhgOBnYERLfazXQn8AHjcfag7DoyIE41BkmRRsDBsQh6O9KlmZvQMeSaj04CXPbkrlcERfJV8hWEtmnsz0H3kuTzPdx/qrlHAYhExwSgkSRYFTSsMWwJfAz4NDGzE93wbuAG4kDzMyDHk5bd1BLsA2wCLNeh7TgLuKErmueT1INRtTocqSbIoaKaFYQXgC8DngDWA/rX6vycDr5Lnsv8ncBUw0pO6tjQogm3IMw1tCCwJ9KtxOXiWvHjahcBd7ke1cm5E7GYMkiSLgmZVGHoBywMbFed6qwFLAAvMwTnf5ARjX4P+L0PfJ4CHioLwCDDBk7pK6R3BisD6wJrAquR7GoaQL131mc2/n0ie0H8k8CRwD3AbcD9ePaiD14BlImKiUUiSLArqSnlYABhKvhF6EDAf+UboXsX53AfA6DuAPeCkV2A5hxNpZuVhKDC8KAwLAPMUO1OQrz69W3z8G3gdeNt9qVGOiogfGYMkyaKg2paJlOYHbiTf1yqpnL4ZEccbgyTJoqBalYSBwHXkoUqSymsqsHFE3G0UkqQq6GUEdS0JA4DLLQlS2zxfHmsMkqSq8IpC/UrCUPIaV+uahtRWVo+IR41BktTuvKJQn5KwDHCnJUFqS06VKkmyKKhbJWEF8pppS5uG1JY+bQSSpCpw6FFtS8JmwD+A+U1Dat9DHVgwIkYbhSSpnXlFoXYl4ZPkhZUtCVJ7CxxWKEmyKGgOS8J+RUkYaBpSJaxiBJIki4JmVxIOB04GepuGVBlLGYEkqd31MYJuF4QAjgQONQ2pcpY0AkmSRUEzKgl9gTOBr5iGVEnDjECSZFFQ55IwL3AJsJVpSJW1oBFIkiwK6lgS5gcuAzY3DanSnN1MkmRR0IclYQRwDbCSaUiV19cIJEntzlmP5qwkbAPcaUmQ5HOnJMkXO5FSGpxSuoB8JWERE5Hkc6ckqSocejTzkrAFcBawhGlIsihIknyxsyD0TSn9HLjOkiBpJqYagSSp3XlF4aMlYS3gHGAV05A0C2ONQJLU7ryikAtCpJS+BfzLkiBpDgxOKW1sDJKkdhaWhLQYeZXlrd0dJHXBK8BKEeHVBUlSW6r0FYWU0q7Ao5YESd0wAjjIGCRJ7aqSVxRSSosApwOfcheQ1AOjgBERMc4oJEntpnJXFFJKOwAPWRIk1cBCwM7GIEmyKJS7IAxIKZ0AXAQMcdNLqpF9jECS1I4qMfQopbQ28FdgBTe5pBqbAgyLiLeNQpLUTtr6ikJKqXdK6XDgbkuCpDrpDXzSGCRJFoXylIShwOXAz3BhOUn15cxpkiSLQklKwueAx4FPu4klNcD6RiBJajdtdY9CSmkAcBKwu5tWUgNNBuaNiPFGIUlqF21zRSGltBxwqyVBUhP0wfugJEkWhZYsCd8GHgHWcZNKapIVjUCS1E5KfZNvSmlB4DRgezelpCZbwggkSRaF1igJawF/B5Z2M0pqkink6VEBRhiHJKmdlHLoUUppL+BflgRJTfY0kIrPFzYOSZJFoXkFYZ6U0rnAGUB/N5+kJlsU+EPx+RDjkCS1k9IMPUopDQcuA9Zzs0lqEYOAB4FbgMHGIUlqJ6W4olCUhDssCZJa0CeAnYDRRiFJaictveBasTbCt4CvAgPdXJJa0H+ARYDlIuIZ45AkWRTqWxDWBo4GtnYTSSqBtSLiIWOQJLWTlhp6lFIallI6HbjHkiCpRLY0AkmSRaE+BaFvSun7wDPA3vxvXnJJKoNNjUCS1G6aPutRSml14DxgZTeHpJJaxwgkSe2mqVcUUkp7ArdbEiSV3OIpJadHlSRZFGpQEPqmlM4AzgLmdTNIKrkA1jYGSZJFoWcloQ9wLrCX8UtqI2sYgSTJotAzJwJfNnpJbWZNI5AkWRS6KaW0K7CvsUtqQ8sbgSSpnTRswbWU0nzk6U+HGbukNjQ6IhYwBklSu2jkFYW9LAmS2tjglNIQY5AkWRS6blvjltTmljMCSZJFoeuWMm5JFgVJkiwKnU0ybkltblkjkCRZFLpugnFLanNLGIEkyaLQdX2NW1KbW9wIJEkWha6b17gltblFjUCS1C4aso5CSmkh4E0auG6DJDXBOGCeiEhGIUkqu0ZdUfiqJUFSBfQHFjIGSZJFYTZSSnOnlNvkOtoAABe9SURBVHYFfmHUkipiMSOQJFkUZl0SNgXeAs4lv8smSVXgDc2SJIvCLErCPMD5eAOzJIuCJEkWhQ7+D2f/kGRRkCTJotDJXkYrqaK8R0GSZFGYkZTSisAKRiupohYxAkmSRWHGNjRWSRU2zAgkSRaFGVvZWCVV2FAjkCRZFGZsAWOVVGELppT6GoMkyaIwvfmNVVKFBTDcGCRJFoXpTTVWSRW3qRFIkiwK03vPWCVV3MZGIEmyKExvtLFKqrg1jECSZFGY3urGKqniVjICSZJFoYOU0k7ANsYqqeIWTCk5TaokyaJQlIR5gd8bqSQBsIQRSJIsCtn3gIWNVJIsCpIkiwIAKaUFgO8apyR9aIQRSJIqXxSA3YFBxilJH1rMCCRJFgXYzygl6SMceiRJqnZRSCmtD6xilJL0EQ49kiRVuygAOxijJE1ncSOQJFW9KGxhjJI0nSEppT7GIEmqZFFIKfUC1jBGSZrh8+uCxiBJqmRRIK+b0N8YJWmGhhiBJKmqRWE+I5SkmRpqBJKkqhaFXkYoSTO1kBFIkqpaFF43QkmaKa8oSJKqWRQi4j1gpDFK0gx5RUGSVM2iULjaGCVphryZWZJU6aJwApCMUpKm49AjSVJ1i0JEPAhcY5SSNB3XUZAkVbcoFPYC/m2ckvQRg41AklTpohARbwC7AR8YqSR9aH4jkCRVuigUZeFGYCvgHWOVJMArCpIki8KHZeFu4J/GKkkAzJdS6m0MkqTKF4XCy8YqSQAEDj+SJFkUPvSCsUrShxx+JEmyKBQeNVZJ+pBXFCRJFoXCfTj7kSRNs4ARSJIsCkBETALuNlpJAryiIEmyKHyEMx9JUjavEUiSLAr/cyGQjFeSLAqSJIvChyLiBeBK45Uki4IkyaLQ2W+NV5IsCpIki8JHRMQtwNlGLMmiIEmSRaGz7+NKzZKqbaARSJIsCp1ExChgB+Ado5ZUUV5RkCRZFGZSFh4ENgae7MI/e99NI8miIElSGxeFoiw8DawO7AvcBUzp9FemAi8CJwCbA0OAW908kiwKkiQ1RzTjm6aU5geWKX47AXg+IsZ1+jvDgPuBRd1MkkrsqYhYyRgkSRaF2haKlYHbgcFuKkkl9XpELGYMkqSy6dXKP1xEPEG+GXqCm0pSSTn0SJJkUahTWbgF+CqQ3FySSmhgSimMQZJkUahPWTgfOMzNJamkz7MDjEGSZFGon6OAE91kkkrI4UeSJItCvUREiogDgUPcbJIsCpIkWRQ6F4ajgcPxngVJ5THQCCRJFoXGlIWfA58F3nUTSrIoSJJkUehYFq4CPgY86WaU1OL6G4EkyaLQ2LLwCLAWcBDwgptTUoty1iNJkkWhCWVhQkQcGxHLREQAewBT3bSSWsg8RiBJsig0vzicA+wPTHbzSmoRXlGQJFkUWqQsnA5sC4x2E0uyKEiSZFHoWBauA1YELnYzS2oyhx5JkiwKLVYW3gS+BOyFNztLap5VjUCSVLpz6ao80JRSb2An4NvAenPw2N8HbgTeAIYBixYv9v3cbSR10ThgkYhw7RdJkkWhxUvDcGBT8tSqw/nfsIAPgIeB+4C7I2JSp3/XB9gA2AHYE1jIXUjSHNozIs42BkmSRaH9y8a85KsTh+JiSpJm75yI2MMYJEkWheoUhhWAS4CVTEPSLLwBDI+IZBSSJItCdcrCEOA2YIUa/ZfvkReN6wUMMmGpbawSEU8YgySpDPoYQQ3aVsRbKaXtgPuBeWfz18cDrwKvFb++3OHzV4CXI2JMhxLSD1gEWB5YHVgX+DjeHyGV0RaARUGSVI5zXCOonZTSUsDiwFwdvjyWvPDbu8C7ETGuBt+nF7AGsCuwG7Cw6Uul8JeI2N0YJEkWBTWinPQBjgG+YxpSy3suIpYzBklSGfQygpI3vYjJwLUmIZXCsiklrwBKkiwKaphHjUAqjQ2MQJJkUVBDRMTr5JmSJFkUJEmyKOgjnjcCqRQ2NgJJkkVBjfSSEUilsF4xCYEkSRYFWRQkfWgAeU0USZIsCmqIkUYglYb3KUiSLApqmLeNQCoN71OQJFkU1DCjjUAqDa8oSJIsCmqYd4xAKo1lU0oLGoMkyaKgRnAdBak8AljLGCRJFgU1wlQjkEplPSOQJFkU1Ai9jUAqlXWMQJJkUZDbUlJn6xqBJMmTSzWCVxSkclkipTTEGCRJFgXVW18jkErHG5olSRYF1d1CRiCVjjc0S5IsCqq7oUYglc7aRiBJsiio3pYwAql0vKIgSbIoqO6WNwKpdBZPKS1sDJIki4LqaQUjkEppTSOQJFkUVBcppUHAaiYhldL6RiBJsiioXtbFdRSksnKKVEmSRUF183kjkErLG5olSS0pjKDcUkq9gNcBb4iUymuRiPi3MUiSWolXFMrvk5YEqfQ2NwJJkkVBtXa4EUilt70RSJJajUOPSiyl9GngKpOQSm80MCwiJhmFJKlVeEWhvCVhHuBPJiG1hcHAtsYgSbIoqBYmAGsDC3T4ONlYpNI6xAgkSa3EoUdtIqXUF3gNGGoaUmltGRE3tfDzTH9gOPkKyPzAQGDe4mO+4mvzdvoYXPzap/hvBnf4L0cDLwJ3AzcBtzj8SpIsCqr9C/j2wCUmIZXaE8A6ETG+Cc8h8wJLd/hYDBgGLFq8AbFIUQbq6R3gHOBXEfGmu4MkWRRUmxf5fwDbmYRUesdExA/q+FwxlLwa9JrA6h2KQStdjfwv8MOIOMndQZIsCurZC//CwKv879K+pBIf0sC3IuL4Gj0/9AK2Ar5IXq9hpRJl8fuI+K67hCQ1hyeW7WF3t6XUNgL4Y0ppCPCLiJjSzYIQwJ7Az4ERJc3iOymlByPibHcLSWrOC5JKLqX0BOV6l1DSnLkD+HpEPNKF54MhwC7A/sAqbZDBW8DSETHG3UGSLArqWknYELjTJKS29i/g7+SZgZ7ueLNzMfRwJWAL4OPAhkDfNnv8u0XEue4GktRYDlcpv72MQGp7mxQf08rBu+R7GfoD/Srw+LcALAqSZFHQnEopDQR2NQmpcuav2ONd1E0uSY3nyszl9iXygkeS1M76GYEkWRTUNXsYgaQKmGgEkmRR0BxKKS1LHrcrSe3uSSOQJIuC5tzuOGuVJIuCJKlOPNEsoZRSb+BlvMFPUjUsExEvGIMkNZZXFMrpE5YESRXxuCVBkiwK+v/t3XuQnXVhxvHvQ2hAuZoCgmUGRXSqtIRUQNAJBbmIhVCgQIFWELFNL3a4TKdVW4bai6OtWGAqg9QyRZhRQKkFnHIRKIJgIlCIjSQkIUEIJCRAICSYwObXP96TdrskYZO9nN97zvczc2bPefeQmTy7Q97n/G7D5yJmSf3iu0YgSd3h1KOWKaXsAiwGJpqGpB73OrB3kqeMQpLGnyMK7XOaJUFSn7jFkiBJFgUNn9OOJPWLi41AkiwKGoZSyv7AgSYhqQ/clOQ+Y5Aki4KG5+NGIKkPDAAXGYMkWRQ0DKWUbYCzTEJSH7gkySPGIEkWBQ3PNOAXjUFSj3sa+LwxSJJFQcN3thFI6gPnJVlpDJLUfZ6j0AKllN2Bp4CtTUNSD7sjydHGIEl1cEShHc62JEjqcUtwwwZJsiho+EopwWlHknr8f3XA2UmWGoUkWRQ0fAcD7zEGST3siiS3GoMkWRS0eRxNkNTLZgLnG4Mk1cfFzBUrpewAPANsbxqSetBDwJFJVhiFJNXHEYW6nTCoJLwCvGwkknrEXOA4S4IkWRS0ZT7V+boKuBDY0Ugk9YDZwNQkS4xCkiwK2kyllH2AqcAa4ETgAFOR1AMeA45IsswoJMmioC3zCWAdcAbwU+BUI5HUcguAo90GVZIsCtpCpZQJwFnAp5PcCEwHfsFkJLXYQuDwJE8bhSS1g7se1VkUjqSZv3tRKWUisAjYw2QktdRS4LAkc4xCktrDEYU6rUpyUef5SZYESS22jGZNgiVBklrGEYXKlVLuBw4xCUkt9AzNdKPHjUKSLAoa3ZLwAeBBk5BkSZAkjTenHtXtD41AUgsttiRIUvs5olCpUsqONJ/IbWcaklrkOeAjSWYbhSS129ZGUK3ftSRIapn1IwnzjEKS2s+pR/X6pBFIsiRIkrrFqUcVKqXsD/yXSUhqiadpphtZEiSphziiUKczjUBSSzwLHG1JkKTe44hCZUop29IM4U8yDUmVm09zmNrPjEKSeo8jCvU51pIgqQWeAI6yJEiSRUHjx2lHkmo3CzgkySKjkKTe5dSjipRSJtHM951oGpIqNRM4JsmLRiFJvc0RhbqcaEmQVLEHaBYuWxIkyaKgcXa6EUiq1F00axJeMgpJ6g9OPapEKWU3mt2OPC1bUm1+AByXZKVRSFL/cEShHqdZEiRV6Ds0IwmWBEmyKKhLTjQCSZW5HjgjyVqjkKT+49SjCnR2O3oOmGAakipxCXBBkmIUktSfHFGow1GWBEkV+UyS8y0JkmRRUPd9zAgkVaAAf57kS0YhSXLqUbf/VS5lK2AJsKtpSOqidcAfJfmaUUiSwF12arC/JUFSlw0Av5/kKqOQJFkU6nGMEUjqoleB05P8u1FIkiwKdfnwkNfLgInATkYjaYytAI5Pcq9RSJKGcjFzF5VSAhw85PIXLAmSxsEC4ABLgiTJolCnvYFJg17fDcwzFkljbC5wRJIFRiFJsijU6f2Dnr8K/B5wkLFIGkP3AgcnedIoJEkWhXq9d9Dzv+t8umdRkDRWvg0clWSFUUiSLAp126Pz9XHgy53nHzAWSWPgGuCMJGuMQpJkUajf9jQnoZ6TZE0pZU88U0HS6CrAeUnOTPKacUiShsvtUbtf1K5Ncl/n9a8YiaRRNAB8OskVRiFJsii0y4M0c4bX289IJI2Sl4GTktxpFJIki0L7/EuSgUGv9zUSSaNgGTAtyQyjkCRZFFpoSEkAmGwqkkZoFvAbSRYbhSRpRPeqRlCHUsrWwEpgW9OQtIVm0IwkLDMKSdJIuetRPd5pSZA0AtcAh1oSJEkWhd7zbiOQtAUK8BngrCRrjUOSNFpco1CPvY1A0mb6eacgXG8UkiSLgkVBkqDZ2eiEJPcbhSTJotDbnHokabjmAscmWWAUkqSx4hqFerzLCCQNw0zgMEuCJMmi0D/2MQJJb+IqYGqSJUYhSbIo9IFSyiRge5OQtBHrgL8APuXORpKk8eIahTrsZgSSNuJF4LQktxuFJMmi0H92NQJJG/AYcHyS+UYhSRpvTj2yKEiq083ABy0JkiSLQn/bxQgkDXIF8FtJVhqFJKlbnHpUB9coSAJYTbNg+ZtGIUmyKAiceiQJFgInJnnUKCRJNXDqkUVBUvfdDhxoSZAkWRQ01CQjkPrWPwHHJXneKCRJNXHqUR3eYgRS31kFnJPkOqOQJFkUtDHbGIHUVxYCJyd52CgkSbVy6lEdtjUCqW9cB0y2JEiSLAoajolGIPW8AeBzwOmejyBJagOnHtVzAyGpdz0JnJLkx0YhSWoLRxTq8LoRSD3rduAAS4IkyaKgLfGaEUg9pwAX02x9utw4JElt49SjOqw1AqmnLAc+keR7RiFJsihoJFYYgdQzbgXOTLLMKCRJbebUozq8YIGQWm8d8LfANEuCJMmioNGyofnLBbjFaKRWWAgckuTCJG5OIEmyKGhMi8JsYJHRSNW7vVMSZhqFJMmioNE2fwPXbgNeMRqpWquBs5J8NMlS45AkWRQ0Fh7fwLW7OjcikuozDzg0yTeMQpJkUdBYmkuzEHK9NcB/Ai6IlOpSgMuA/ZI8ZBySJIuCxlSSV4EnBl2akWQ1sMR0pGq8BJyR5NwkPzcOSZJFQePl3kHPv9/5alGQ6nAPMCXJt4xCkmRR0Hi7e9Dz2zpfnzUWqatW0ixYPizJQuOQJPWTGEEdSinvpNmL/SVgl/V7sZdSVgFvNSFp3P0QODvJPKOQJPUjRxRqaWzJIprpDXcOObBpvulI4+oVYDow1ZIgSepnWxtBVb4O7DDk2lxgP6ORxsXdwCc7xV2SJIuCqnEjsOuQa36iKY29NcBfAf+QZMA4JEmyKFSlsyXqk0MuzzEZaUzdQzOK8IRRSJL0f1yjUL/HjUAaE6tp1iIcbkmQJOmN3PWocqWUnYEX/FlJo+pRmm1PHzUKSZI2zBGF2ptcsgKnH0mjZTXwWeBAS4IkSRaFXvBDI5BG7BpgnyRfTPKacUiSZFHoBQ8YgbTFFgHTkpyZxNPOJUmyKFgUpD63Fvg8sG+SW4xDkqTN4wLZFiilBFgOTDINadjl+g+SzDIKSZK2jCMKbWhzSQF+bBLSm3oFuAA41JIgSZJFoV/cZgTSJq1frPyPSV43DkmSRsapRy1RSvll4DGTkN7gZ8CfJLnJKCRJGj2OKLSl0SVzgHkmIf2vl4DzaEYRLAmSJFkU+po3QxIUmmlG+ya51DMRJEkaG049atPdUSmTgUdMQn3sVuCCJE7DkyTJoqAhZWEW8KsmoT6zADg/yc1GIUnS+HDqUft80wjUR1YBFwH7WRIkSRpfjii0TCnlbcBTwHamoR42AHwV+OskzxuHJEnjzxGFtjW75EXgOpNQD7sf+HCScy0JkiR18b7TCNqnlLIfzaJmf37qJY8AFya5xSgkSeo+RxTa2O6SWcCNQy7fZzJqqTnAbwO/ZkmQJMmioJH7LDB4//jLaU6oldpiLnAq8P4k1ycpRiJJkkVBI5RkHnD1oEt7AdNpDqOSavYM8Mc0OxndYEGQJMmioNH3OWB55/mUJLcClxmLKrUc+FNgnySXJ1lrJJIk1cvFsC1XSvk48A1gXpL3llIm0qxXONB0VIkVwFeAS5KsNA5JkiwKGr+y8B/AR4Gdk7xcStkbeBjYyXTURU8DlwBXWhAkSWofpx71hjOAecAUgCRPAL+D6xXUHTOA44G9klxsSZAkyaKgLukcwnY88K5B175H82muNB4KcAswNcnBSW5Oss5YJElq8T2mEfTQnVop2yVZNej1tjSn3E4xHY2RdcBNwN8necA4JEmyKKg95eE9wEPADqahUbQGuBb4cpI5xiFJkkVB7SwLJwDfwalmGrnFwNeAf06yxDgkSbIoqP1l4c+AL5mEtsAAcCNwJXCXaw8kSbIoqPfKwrU0uyFJw/E88HWa0YMFxiFJkkVBvVsUtgPuZdOLm78NTAXebmJ9ay5wOXB1kpeMQ5Kk/uSc9X5qhc2OSMcCizbxttnAr9MclqX+sYZmHcvRwPuSXGZJkCTJoqD+KgvPAh8BlmziPXOBD9Ec4qbeNUBz9sGpwNuSnJzkjiQe1CdJkiwKfVoWFtJ8crxiE+95imYK0k9MrOc8CpxHc3LytCQ3JHnVWCRJkkVBJPkJcBKwchPvWdopFP9tYq33MnAVcBgwJcmlSRYbiyRJsihoQ0XgbpppSMs28Z4lwOHAwybWOqtoFqefDuye5Jwk9zi1SJIkWRQ0nLLwIHAosHAT71kOHAH8yMSq9zzwr8BvArsmOSXJt5xaJEmSNvs+0QgEUErZkeZAtqeSfGEj79ke+DfgSBOrytPAdzs/mx8ked1IJEmSRUGjXRi22tTJu6WUbYBrgFNMq2vWAQ8Bd3QKwoNOJ5IkSRYF1VAmJgBfBaZv5C3PAnuY1Kha2CkG3wfuTPKCkUiSJIuCai0MfwP85Qa+NR2YCZwFnAbsblqbbQVw1/pykGS+kUiSJIuC2lQWzgW+wv9fGD89yZWd708AjgJOBo4BfsnU3mAd8Bgwo/P4ETA7yYDRSJKkbtnaCDSipplcWkpZSrNH/1s28P0B4NbOg1LKZOBjnceH+vR38LlBpWAGMDPJy/42SZKkqu7zjECjoZRyEM2uO+9g0IjCm/w3O3fKwkHABztfJ/VQLKuBn9Kcbj0bmEUzUvCMvzGSJMmioH4qC+8ArgZuGE5R2MifsRewPzC583gfsDewTaV/7TXAk8CiQY85nXLwxKZ2kJIkSbIoqJ/KQoDdkiwdxT9zK2BP4N2dx540Ixd7DHq8HZgwin+VlTQnVi8Dlg96PAcs7hSChcCzbk0qSZIsClLdJWUisB2wE816ibcCO7/J7/lK4DWaXYZeA14BVidZY6KSJEmSJEmSJEmSJEmSJEmSJEmSJEmSJEmSJEmSJEmSJEmSJEmSJEmSJEmSJEmSJElt8D/476Mu0neorwAAAABJRU5ErkJggg=='
mail = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Alarm from RedELK</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<style type="text/css">
#normal {
font-family: Tahoma, Geneva, sans-serif;
font-size: 16px;
line-height: 24px;
}
</style>
</head>
<body style="margin: 0; padding: 0;">
<table align="center" cellpadding="0" cellspacing="0" width="800" style="border-collapse: collapse;" style="max-width:800px;">
<tr>
<td bgcolor="#212121" rowspan=2 width="120px" style="padding: 30px 30px 30px 30px; text-align:center;">
<img height="60px" src="data:image/png;base64,%s" alt="img" />
</td>
<td bgcolor="#212121" height="30px" style="color: #FAFAFA; font-family: Arial, sans-serif; font-size: 24px; padding: 30px 30px 0px 10px;">
RedELK alarm: <em>%s</em>
</td>
</tr>
<tr>
<td bgcolor="#212121" height="20px" style="color: #FAFAFA; font-family: Arial, sans-serif; font-size: 16px; line-height: 20px; padding: 20px 30px 30px 10px;">
Total hits: <em>%d</em>
</td>
</tr>
<tr>
<td colspan=2 style="color: #153643; font-family: Arial, sans-serif; font-size: 16px; line-height: 20px; padding: 0px 30px 0px 10px;">
<p>%s</p>
</td>
</tr>
''' % (img, alarm['info']['name'], alarm['hits']['total'], alarm['info']['description'])
subject = 'Alarm from %s [%s hits]' % (alarm['info']['name'], alarm['hits']['total'])
if len(alarm['groupby']) > 0:
mail += '''
<tr>
<td colspan=2 style="color: #153643; font-family: Arial, sans-serif; font-size: 12px; line-height: 16px; padding: 0px 15px 0px 15px;">
<p>Please note that the items below have been grouped by: %s</p>
</td>
</tr>
''' % pprint(alarm['groupby'])
try:
for hit in alarm['hits']['hits']:
i = 0
title = hit['_id']
while i < len(alarm['groupby']):
if i == 0:
title = getValue('_source.%s' % alarm['groupby'][i], hit)
else:
title = '%s / %s' % (title, getValue('_source.%s' % alarm['groupby'][i], hit))
i += 1
mail += '''
<tr>
<td bgcolor="#323232" colspan=2 style="color: #FAFAFA; font-family: Arial, sans-serif; font-size: 16px; line-height: 20px; padding: 10px 10px 10px 10px; text-align:center;">
<b>%s</b>
</td>
</tr>
''' % title
r = 0
for field in alarm['fields']:
bgcolor = '#FAFAFA' if r % 2 == 0 else '#F1F1F1'
val = getValue('_source.%s' % field, hit)
value = json2html.convert(json = val)
mail += '''
<tr bgcolor="%s" style="color: #153643; font-family: Arial, sans-serif; font-size: 12px; line-height: 16px;">
<td style="padding: 10px 10px 10px 10px;"><b>%s</b></td>
<td style="padding: 10px 10px 10px 10px; white-space:pre-wrap; word-wrap:break-word">%s</td>
</tr>
''' % (bgcolor, field, value)
r += 1
mail += '<tr><td colspan=2 style="padding: 15px;"> </td></tr>'
mail += '</table>\n</body>\n</html>'
except Exception as e:
self.logger.error('Error sending email: %s' % e)
self.logger.exception(e)
pass
mail += "</body></html>\n"
#self.logger.debug('Sending email: %s' % mail)
smtpResp = self.SendMail(notifications['email']['to'], mail, subject)
| 391.358382
| 60,253
| 0.920641
|
97598790b59ac25684c5914151a3314045835995
| 5,798
|
py
|
Python
|
wandb/__init__.py
|
tgisaturday/client
|
9c3274cf6035636e0e05ce7e1d869bb2f5e03482
|
[
"MIT"
] | 1
|
2020-09-16T19:06:01.000Z
|
2020-09-16T19:06:01.000Z
|
wandb/__init__.py
|
ashzblum/client
|
768ca0b40be3bbd58fc7bfe6211c06ca07e1b216
|
[
"MIT"
] | null | null | null |
wandb/__init__.py
|
ashzblum/client
|
768ca0b40be3bbd58fc7bfe6211c06ca07e1b216
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Wandb is a library to help track machine learning experiments.
For more information on wandb see https://docs.wandb.com.
The most commonly used functions/objects are:
- wandb.init — initialize a new run at the top of your training script
- wandb.config — track hyperparameters
- wandb.log — log metrics over time within your training loop
- wandb.save — save files in association with your run, like model weights
- wandb.restore — restore the state of your code when you ran a given run
For examples usage, see github.com/wandb/examples
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__version__ = "0.11.3.dev1"
# Used with pypi checks and other messages related to pip
_wandb_module = "wandb"
import sys
from wandb.errors import Error
# This needs to be early as other modules call it.
from wandb.errors.term import termsetup, termlog, termerror, termwarn
from wandb import sdk as wandb_sdk
import wandb
wandb.wandb_lib = wandb_sdk.lib
init = wandb_sdk.init
setup = wandb_sdk.setup
save = wandb_sdk.save
watch = wandb_sdk.watch
unwatch = wandb_sdk.unwatch
finish = wandb_sdk.finish
join = finish
login = wandb_sdk.login
helper = wandb_sdk.helper
sweep = wandb_sdk.sweep
controller = wandb_sdk.controller
require = wandb_sdk.require
Artifact = wandb_sdk.Artifact
AlertLevel = wandb_sdk.AlertLevel
Settings = wandb_sdk.Settings
Config = wandb_sdk.Config
from wandb.apis import InternalApi, PublicApi
from wandb.errors import CommError, UsageError
_preinit = wandb_lib.preinit
_lazyloader = wandb_lib.lazyloader
from wandb import wandb_torch
# Move this (keras.__init__ expects it at top level)
from wandb.data_types import Graph
from wandb.data_types import Image
from wandb.data_types import Plotly
# from wandb.data_types import Bokeh # keeping out of top level for now since Bokeh plots have poor UI
from wandb.data_types import Video
from wandb.data_types import Audio
from wandb.data_types import Table
from wandb.data_types import Html
from wandb.data_types import Object3D
from wandb.data_types import Molecule
from wandb.data_types import Histogram
from wandb.data_types import Classes
from wandb.data_types import JoinedTable
from wandb.wandb_agent import agent
from wandb import superagent
# from wandb.core import *
from wandb.viz import visualize
from wandb import plot
from wandb import plots # deprecating this
from wandb.integration.sagemaker import sagemaker_auth
# Used to make sure we don't use some code in the incorrect process context
_IS_INTERNAL_PROCESS = False
def _set_internal_process(disable=False):
global _IS_INTERNAL_PROCESS
if _IS_INTERNAL_PROCESS is None:
return
if disable:
_IS_INTERNAL_PROCESS = None
return
_IS_INTERNAL_PROCESS = True
def _assert_is_internal_process():
if _IS_INTERNAL_PROCESS is None:
return
assert _IS_INTERNAL_PROCESS
def _assert_is_user_process():
if _IS_INTERNAL_PROCESS is None:
return
assert not _IS_INTERNAL_PROCESS
# toplevel:
# save()
# restore()
# login()
# sweep()
# agent()
# globals
Api = PublicApi
api = InternalApi()
run = None
config = _preinit.PreInitCallable(
_preinit.PreInitObject("wandb.config"), wandb_sdk.wandb_config.Config
)
summary = _preinit.PreInitCallable(
_preinit.PreInitObject("wandb.summary"), wandb_sdk.wandb_summary.Summary
)
log = _preinit.PreInitCallable("wandb.log", wandb_sdk.wandb_run.Run.log)
save = _preinit.PreInitCallable("wandb.save", wandb_sdk.wandb_run.Run.save)
restore = wandb_sdk.wandb_run.restore
use_artifact = _preinit.PreInitCallable(
"wandb.use_artifact", wandb_sdk.wandb_run.Run.use_artifact
)
log_artifact = _preinit.PreInitCallable(
"wandb.log_artifact", wandb_sdk.wandb_run.Run.log_artifact
)
define_metric = _preinit.PreInitCallable(
"wandb.define_metric", wandb_sdk.wandb_run.Run.define_metric
)
mark_preempting = _preinit.PreInitCallable(
"wandb.mark_preempting", wandb_sdk.wandb_run.Run.mark_preempting
)
plot_table = _preinit.PreInitCallable(
"wandb.plot_table", wandb_sdk.wandb_run.Run.plot_table
)
alert = _preinit.PreInitCallable("wandb.alert", wandb_sdk.wandb_run.Run.alert)
# record of patched libraries
patched = {"tensorboard": [], "keras": [], "gym": []}
keras = _lazyloader.LazyLoader("wandb.keras", globals(), "wandb.integration.keras")
sklearn = _lazyloader.LazyLoader("wandb.sklearn", globals(), "wandb.sklearn")
tensorflow = _lazyloader.LazyLoader(
"wandb.tensorflow", globals(), "wandb.integration.tensorflow"
)
xgboost = _lazyloader.LazyLoader(
"wandb.xgboost", globals(), "wandb.integration.xgboost"
)
tensorboard = _lazyloader.LazyLoader(
"wandb.tensorboard", globals(), "wandb.integration.tensorboard"
)
gym = _lazyloader.LazyLoader("wandb.gym", globals(), "wandb.integration.gym")
lightgbm = _lazyloader.LazyLoader(
"wandb.lightgbm", globals(), "wandb.integration.lightgbm"
)
docker = _lazyloader.LazyLoader("wandb.docker", globals(), "wandb.docker")
jupyter = _lazyloader.LazyLoader("wandb.jupyter", globals(), "wandb.jupyter")
sacred = _lazyloader.LazyLoader("wandb.sacred", globals(), "wandb.integration.sacred")
def ensure_configured():
global api
api = InternalApi()
def set_trace():
import pdb # TODO: support other debuggers
# frame = sys._getframe().f_back
pdb.set_trace() # TODO: pass the parent stack...
__all__ = [
"__version__",
"init",
"setup",
"save",
"sweep",
"controller",
"agent",
"config",
"log",
"summary",
"join",
"Api",
"Graph",
"Image",
"Plotly",
"Video",
"Audio",
"Table",
"Html",
"Object3D",
"Molecule",
"Histogram",
"_enable",
]
| 26.842593
| 102
| 0.754053
|
8ef9b278c59b97d06d0eddc028c98fd6bf7ef957
| 954
|
py
|
Python
|
pyinterview/tries.py
|
jodahoney/pyinterview
|
58662b569edf077fbea2cda8e2a045d5da55c7bf
|
[
"MIT"
] | 2
|
2021-08-20T22:28:26.000Z
|
2021-10-03T06:11:23.000Z
|
pyinterview/tries.py
|
jodahoney/pyinterview
|
58662b569edf077fbea2cda8e2a045d5da55c7bf
|
[
"MIT"
] | 15
|
2021-10-01T00:26:48.000Z
|
2021-10-07T21:19:23.000Z
|
pyinterview/tries.py
|
jodahoney/pyinterview
|
58662b569edf077fbea2cda8e2a045d5da55c7bf
|
[
"MIT"
] | 5
|
2021-10-02T22:20:45.000Z
|
2021-10-04T05:47:45.000Z
|
class TrieNode:
def __init__(self, letter=None):
self.letter = letter
self.children = {}
self.is_end_of_word = False
class Trie:
def __init__(self):
self.root = TrieNode("*")
def add_word(self, word: str) -> None:
itr = self.root
for letter in word:
if letter not in itr.children:
itr.children[letter] = TrieNode(letter)
itr = itr.children[letter]
itr.is_end_of_word = True
def search(self, word: str) -> bool:
itr = self.root
for letter in word:
if letter not in itr.children:
return False
itr = itr.children[letter]
return itr.is_end_of_word
def starts_with(self, word: str) -> bool:
itr = self.root
for letter in word:
if letter not in itr.children:
return False
itr = itr.children[letter]
return True
| 27.257143
| 55
| 0.550314
|
72a99777dea025608390e4d0e81f8afe0eef7bc5
| 809
|
py
|
Python
|
Lib/corpuscrawler/crawl_kac.py
|
cash/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 95
|
2019-06-13T23:34:21.000Z
|
2022-03-12T05:22:49.000Z
|
Lib/corpuscrawler/crawl_kac.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 31
|
2019-06-02T18:56:53.000Z
|
2021-08-10T20:16:02.000Z
|
Lib/corpuscrawler/crawl_kac.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 35
|
2019-06-18T08:26:24.000Z
|
2022-01-11T13:59:40.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='kac')
crawl_bibleis(crawler, out, bible='KACUBS')
| 36.772727
| 74
| 0.770087
|
e99ce57d03981b064dd42f946e806317b9ac26a5
| 5,340
|
py
|
Python
|
src/data/yahoofinance.py
|
philippschmalen/Alternative-ESG-data-codebase
|
bf8b1de3ff4e2deebc206270ce81cc8bc94028a8
|
[
"MIT"
] | null | null | null |
src/data/yahoofinance.py
|
philippschmalen/Alternative-ESG-data-codebase
|
bf8b1de3ff4e2deebc206270ce81cc8bc94028a8
|
[
"MIT"
] | 1
|
2021-06-16T15:37:21.000Z
|
2021-06-16T15:37:21.000Z
|
src/data/yahoofinance.py
|
philippschmalen/Alternative-ESG-data-codebase
|
bf8b1de3ff4e2deebc206270ce81cc8bc94028a8
|
[
"MIT"
] | 1
|
2021-05-22T14:26:02.000Z
|
2021-05-22T14:26:02.000Z
|
"""
Retrieve firm-level esg scores, process firm names and construct query strings
"""
from yahooquery import Ticker
from pytickersymbols import PyTickerSymbols
import logging
import numpy as np
import pandas as pd
import yaml
# ---------------------------------------------------
# INDEX DETAILS
# ---------------------------------------------------
def get_index_stock_details(pytickersymbols, index_name):
"""Get firm name, stock ticker for a specified stock index.
Available indices from pytickersymbols: PyTickerSymbols().get_all_indices()
See https://github.com/portfolioplus/pytickersymbols for package details
Args:
pytickersymbols (object): Init object from PyTickerSymbols()
index_name (str): Index name from PyTickerSymbols().get_all_indices()
Returns:
Dataframe:
"""
index_details = pd.DataFrame(pytickersymbols.get_stocks_by_index(index_name))
# string encoding
try:
index_details.name = index_details.name.str.encode("latin-1").str.decode(
"utf-8"
)
except Exception:
logging.warning(f"Encoding error for {index_name}")
index_details.name = index_details.name.str.encode("utf-8").str.decode("utf-8")
# retrieve yahoo ticker symbol
index_details["yahoo_ticker"] = index_details.symbols.apply(
lambda x: x[0]["yahoo"] if len(x) > 1 else np.nan
)
index_details.yahoo_ticker.fillna(index_details.symbol, inplace=True)
# set ticker as index
index_details.set_index("yahoo_ticker", inplace=True, drop=False)
return index_details
# ---------------------------------------------------
# FIRM-LEVEL ESG DATA
# ---------------------------------------------------
def get_esg_details(yahoo_ticker):
"""Returns esg information for suitable yahoo ticker which can be string, pd.Series or list"""
# convert series to list
if isinstance(yahoo_ticker, pd.Series):
yahoo_ticker = yahoo_ticker.to_list()
ticker_details = Ticker(yahoo_ticker)
esg_df = pd.DataFrame(ticker_details.esg_scores).T
return esg_df
def get_index_firm_esg(pytickersymbols, index_name):
"""Merge index, firm name and esg data"""
index_stocks = get_index_stock_details(
pytickersymbols=pytickersymbols, index_name=index_name
)
esg_details = get_esg_details(yahoo_ticker=index_stocks.yahoo_ticker)
stocks_esg = pd.concat([index_stocks, esg_details], axis=1)
return stocks_esg
def replace_firm_names(df, settings_path):
"""Replace firm names as specified in settings.yaml"""
with open(settings_path, encoding="utf8") as file:
settings = yaml.safe_load(file)
try:
settings["query"]["firm_names"]
except Exception:
logging.warning(
"No firm names specified in settings['query']['firm_name']. \
Firm names still contain legal suffix which compromises search results."
)
assert (
"name" in df.columns
), "Dataframe has no name column. Firm names cannot be replaced."
replace_firm_names = settings["query"]["firm_names"]
df["firm_name"] = df.name.replace(replace_firm_names, regex=True).str.strip()
return df
def remove_missing_esg_firms(esg_df, missing_placeholder="No fundamentals data"):
"""Drops firms that have no ESG scores. Placeholder from Yahoo"""
return esg_df.loc[~esg_df.peerGroup.str.contains(missing_placeholder)]
def get_esg_controversy_keywords(settings_path):
"""Load controversy keywords from settings.yaml"""
with open(settings_path, encoding="utf8") as file:
settings = yaml.full_load(file)
controversies = settings["esg"]["negative"]
return controversies
def create_query_keywords(esg_df, keyword_list, explode=True):
"""Construct query keywords from firm_name and a list of keywords
Args:
esg_df (Dataframe): Data from yahooquery Ticker(yahoo_ticker).esg_scores, processed firm names
keyword_list (list): list of strings that are attached to each firm name
explode (boolean): If true re-shapes to logn format with each row having a unique query_keyword
Returns:
Dataframe: added query_keyword column (firm_name + keyword)
"""
esg_df["query_keyword"] = esg_df.firm_name.apply(
lambda x: [x + kw for kw in keyword_list]
)
if explode:
return esg_df.explode(column="query_keyword")
else:
return esg_df
def esg_firm_query_keywords_pipeline(index_name, path_to_settings):
"""ESG scores, processed firm names and firm name query strings in a dataframe.
Args:
index_name (string): Index name, one of PyTickerSymbols().get_all_indices()
path_to_settings (string): path to settings.yaml, where all esg keywords are specified
Returns:
Dataframe: esg scores and related data from Yahoo!Finance incl. processed firm names and query keywords
"""
pytickersymbols = PyTickerSymbols()
controversy_keywords = get_esg_controversy_keywords(path_to_settings)
esg_df = (
get_index_firm_esg(pytickersymbols=pytickersymbols, index_name=index_name)
.pipe(replace_firm_names, settings_path=path_to_settings)
.pipe(remove_missing_esg_firms)
.pipe(create_query_keywords, keyword_list=controversy_keywords)
)
return esg_df
| 32.363636
| 111
| 0.687079
|
cb4a9d0d0e835f5ce9e9b9d013002a66721d67e9
| 624
|
py
|
Python
|
conf_dev.py
|
tang3/DarkNet_ChineseTrading
|
0d2d4c998fe7527ee5d0276d05e79b20e915873a
|
[
"MIT"
] | 1
|
2019-06-28T02:23:49.000Z
|
2019-06-28T02:23:49.000Z
|
conf_dev.py
|
tang3/DarkNet_ChineseTrading
|
0d2d4c998fe7527ee5d0276d05e79b20e915873a
|
[
"MIT"
] | null | null | null |
conf_dev.py
|
tang3/DarkNet_ChineseTrading
|
0d2d4c998fe7527ee5d0276d05e79b20e915873a
|
[
"MIT"
] | null | null | null |
class ConfigDev:
debug = True
mysql_host = "1.2.3.4"
mysql_port = 3306
mysql_usr = "root"
mysql_pass = ""
mysql_db = "db"
redis_host = "127.0.0.1"
redis_port = 6379
telegram_proxy = None
telegram_token = "xxxxxxxxxxxxxxxxxxx"
darknetchannelID = "-100000000000"
ReportGroupID = "-100000000001"
tor_proxy = "socks5h://127.0.0.1:9150"
sendForTest = False
filterArea = (
"其它类别",
"卡料-CVV",
"基础知识",
"实体物品",
"影视-色情",
"技术-教学",
"数据-情报",
"服务-接单",
"私人专拍",
"虚拟资源",
)
Config = ConfigDev
| 17.828571
| 42
| 0.522436
|
38d2421bb4f2fd91abd7226c3c7551fe3d81a447
| 374
|
py
|
Python
|
helixalm/__init__.py
|
tcrory/helixalm
|
a8a4fe7a5f845617f62d923974309228a0c412f5
|
[
"MIT"
] | null | null | null |
helixalm/__init__.py
|
tcrory/helixalm
|
a8a4fe7a5f845617f62d923974309228a0c412f5
|
[
"MIT"
] | 1
|
2018-12-18T13:43:54.000Z
|
2018-12-18T13:55:21.000Z
|
helixalm/__init__.py
|
tcrory/helixalm
|
a8a4fe7a5f845617f62d923974309228a0c412f5
|
[
"MIT"
] | null | null | null |
"""
Helix ALM API Python Wrapper
helixalm is a python package that allows for simple access to Helix ALM's REST API.
"""
import sys
# Enforce Python 3.6+
if sys.verion_info.major != 3 or sys.verion_info.minor < 6:
raise ImportError('Helix ALM package requires Python 3.6 or greater.')
# Package level imports.
from .const import __version__
from .helix import Helix
| 23.375
| 83
| 0.748663
|
9b567d9c09ea25ef1fb4893ec006417cc777f574
| 3,761
|
py
|
Python
|
app/uis/chat/message.py
|
CNC-Messenger/Desktop-Client
|
8b3fc7b3bad78d8dbfcf806993f797c3f8932c51
|
[
"MIT"
] | 86
|
2021-05-27T05:47:10.000Z
|
2022-03-13T06:31:47.000Z
|
app/uis/chat/message.py
|
CNC-Messenger/Desktop-Client
|
8b3fc7b3bad78d8dbfcf806993f797c3f8932c51
|
[
"MIT"
] | 2
|
2021-05-26T19:00:41.000Z
|
2021-06-29T13:17:38.000Z
|
app/uis/chat/message.py
|
CNC-Messenger/Desktop-Client
|
8b3fc7b3bad78d8dbfcf806993f797c3f8932c51
|
[
"MIT"
] | 38
|
2021-05-31T16:12:04.000Z
|
2022-03-24T09:49:56.000Z
|
# ///////////////////////////////////////////////////////////////
#
# BY: WANDERSON M.PIMENTA
# PROJECT MADE WITH: Qt Designer and PySide6
# V: 1.0.0
#
# This project can be used freely for all uses, as long as they maintain the
# respective credits only in the Python scripts, any information in the visual
# interface (GUI) can be modified without any implication.
#
# There are limitations on Qt licenses if you want to use your products
# commercially, I recommend reading them on the official website:
# https://doc.qt.io/qtforpython/licenses.html
#
# ///////////////////////////////////////////////////////////////
# DEFAULT PACKAGES
# ///////////////////////////////////////////////////////////////
import os
# IMPORT / GUI, SETTINGS AND WIDGETS
# ///////////////////////////////////////////////////////////////
# Packages
from datetime import datetime
from app.packages.pyside_or_pyqt import * # Qt
# GLOBALS
send_by = None
# MAIN WINDOW
# ///////////////////////////////////////////////////////////////
class Message(QWidget):
def __init__(self, message, me_send):
QWidget.__init__(self)
global send_by
send_by = me_send
self.setMinimumHeight(20)
self.setup_ui()
self.setFixedHeight(self.layout.sizeHint().height())
# SET MESSAGE
self.message.setText(message)
# SET DATE TIME
date_time = datetime.now()
date_time_format = date_time.strftime("%m/%d/%Y %H:%M")
self.data_message.setText(str(date_time_format))
def setup_ui(self):
# LAYOUT
self.layout = QHBoxLayout(self)
self.layout.setContentsMargins(0,0,0,0)
# FRAME BG
self.bg = QFrame()
if send_by:
self.bg.setStyleSheet("#bg {background-color: #0e0e0f; border-radius: 10px; margin-left: 150px; } #bg:hover { background-color: #252628; }")
else:
self.bg.setStyleSheet("#bg {background-color: #28282b; border-radius: 10px; margin-right: 150px; } #bg:hover { background-color: #252628; }")
self.bg.setObjectName("bg")
# FRAME BG
self.btn = QPushButton()
self.btn.setMinimumSize(40, 40)
self.btn.setMaximumSize(40, 40)
self.btn.setStyleSheet("""
QPushButton {
background-color: transparent;
border-radius: 20px;
background-repeat: no-repeat;
background-position: center;
background-image: url(:/icons_svg/images/icons_svg/icon_more_options.svg);
}
QPushButton:hover {
background-color: rgb(61, 62, 65);
}
QPushButton:pressed {
background-color: rgb(16, 17, 18);
}
""")
if send_by:
self.layout.addWidget(self.bg)
self.layout.addWidget(self.btn)
else:
self.layout.addWidget(self.btn)
self.layout.addWidget(self.bg)
# LAYOUT INSIDE
self.layout_inside = QVBoxLayout(self.bg)
self.layout.setContentsMargins(10,10,10,10)
# LABEL MESSAGE
self.message = QLabel()
self.message.setText("message test")
self.message.setStyleSheet("font: 500 11pt 'Segoe UI'")
self.message.setTextInteractionFlags(Qt.TextSelectableByMouse|Qt.TextSelectableByKeyboard)
# LABEL MESSAGE
self.data_message = QLabel()
self.data_message.setText("date")
self.data_message.setStyleSheet("font: 8pt 'Segoe UI'; color: #4c5154")
if send_by:
self.data_message.setAlignment(Qt.AlignRight)
else:
self.data_message.setAlignment(Qt.AlignLeft)
self.layout_inside.addWidget(self.message)
self.layout_inside.addWidget(self.data_message)
| 33.882883
| 153
| 0.578835
|
b73c37e09180d9b145a7e1dcf4b27f2e3a0b1095
| 1,348
|
py
|
Python
|
orders/models.py
|
faizalazman/Bakery
|
a22d9ec34ee0e3c91c7caf7d53cb2db730885e3f
|
[
"MIT"
] | null | null | null |
orders/models.py
|
faizalazman/Bakery
|
a22d9ec34ee0e3c91c7caf7d53cb2db730885e3f
|
[
"MIT"
] | null | null | null |
orders/models.py
|
faizalazman/Bakery
|
a22d9ec34ee0e3c91c7caf7d53cb2db730885e3f
|
[
"MIT"
] | null | null | null |
from django.db import models
from menu.models import Menu
class Order(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email = models.EmailField()
address = models.CharField(max_length=250)
phone = models.CharField(max_length=20)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
paid = models.BooleanField(default=False)
order_notes = models.CharField(max_length=200)
braintree_id = models.CharField(max_length=150, blank=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return f'Order {self.id}'
def get_total_cost(self):
return sum(item.get_cost() for item in self.items.all())
class OrderItem(models.Model):
order = models.ForeignKey(Order,
related_name='items',
on_delete=models.CASCADE)
product = models.ForeignKey(Menu,
related_name='order_items',
on_delete=models.CASCADE)
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField(default=1)
def __str__(self):
return str(self.id)
def get_cost(self):
return self.price * self.quantity
| 32.095238
| 64
| 0.649852
|
e78e432c494a2039e0a6691fab3f07475d185c7c
| 1,608
|
py
|
Python
|
envy/lib/file_downloader.py
|
magmastonealex/fydp
|
fe3df058c3a7036e7e87ce6e7837b598007d7740
|
[
"MIT"
] | 6
|
2019-06-26T02:32:12.000Z
|
2020-03-01T23:08:37.000Z
|
envy/lib/file_downloader.py
|
magmastonealex/fydp
|
fe3df058c3a7036e7e87ce6e7837b598007d7740
|
[
"MIT"
] | 18
|
2019-06-26T04:08:33.000Z
|
2021-06-01T23:53:08.000Z
|
envy/lib/file_downloader.py
|
envy-project/envy
|
fe3df058c3a7036e7e87ce6e7837b598007d7740
|
[
"MIT"
] | null | null | null |
import requests
from envy.lib.config.file import find_config_file
class ConfigExecFile:
def __init__(self, filename, byt):
self.filename = filename
self.bytes = byt
class FileDownloadError(Exception):
def __init__(self, requests_error):
super(FileDownloadError, self).__init__()
self.requests_error = requests_error
def resolve_files(file_objects):
""" Turn file objects from the config into "real" objects with Byte strings.
Support URL and path formats
Args:
file_objects (list<dict>): fileObjects from the config
Returns:
list<ConfigExecFile>: List of executable files to run in the image
Raises:
FileDownloadError: When a file fails to download for some reason. Contains the Requests error.
"""
if not file_objects:
return None
project_root = find_config_file().parent
returned_list = []
for obj in file_objects:
try:
if "url" in obj:
r = requests.get(obj["url"])
returned_list.append(ConfigExecFile(obj["filename"], r.content))
elif "path" in obj:
file_path = "{}/{}".format(project_root, obj["path"])
try:
fil = open(filePath, "rb")
returned_list.append(ConfigExecFile(obj["filename"], fil.read()))
except:
raise Exception("Failed opening file at " + file_path)
except requests.exceptions.RequestException as e:
raise FileDownloadError(e)
return returned_list
| 34.956522
| 106
| 0.616294
|
6cdd3d7b38b29b49bf06bb9777a11f26053323e7
| 375
|
py
|
Python
|
release/release-control-shell/service/java_service_action.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 39
|
2021-10-12T01:43:27.000Z
|
2022-03-28T04:46:35.000Z
|
release/release-control-shell/service/java_service_action.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 6
|
2021-10-14T02:11:47.000Z
|
2022-03-23T02:41:50.000Z
|
release/release-control-shell/service/java_service_action.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 10
|
2021-10-14T09:36:03.000Z
|
2022-02-10T11:05:12.000Z
|
import os.path
from service.service_action import BaseServiceAction
class JavaServiceAction(BaseServiceAction):
def run(self):
download_path = self.download(self.config.service + ".jar")
target = os.path.join(
self.wefe_dir,
self.config.service + ".jar"
)
self.replace_file(download_path, target)
pass
| 22.058824
| 67
| 0.642667
|
40bbd2f16972dfc87130960d0ab83e9cf6534fb5
| 627
|
py
|
Python
|
plotly/validators/scatter/marker/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 2
|
2018-12-03T15:20:42.000Z
|
2018-12-03T15:20:47.000Z
|
plotly/validators/scatter/marker/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/scatter/marker/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 2
|
2019-06-17T01:35:57.000Z
|
2020-11-03T01:07:19.000Z
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name='color', parent_name='scatter.marker', **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
colorscale_path=kwargs.pop(
'colorscale_path', 'scatter.marker.colorscale'
),
**kwargs
)
| 31.35
| 73
| 0.606061
|
5f8d6bf088e739cdac2f5fc3d26c1e965648819c
| 10,842
|
py
|
Python
|
neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py
|
JackyGao2016/OpenStack-ML2
|
fc0569e72c7cfdbf9464c95572b242bfdd4cd79c
|
[
"Apache-2.0"
] | 1
|
2021-02-19T05:54:04.000Z
|
2021-02-19T05:54:04.000Z
|
neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py
|
JackyGao2016/OpenStack-ML2
|
fc0569e72c7cfdbf9464c95572b242bfdd4cd79c
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py
|
JackyGao2016/OpenStack-ML2
|
fc0569e72c7cfdbf9464c95572b242bfdd4cd79c
|
[
"Apache-2.0"
] | 2
|
2016-11-29T11:22:58.000Z
|
2016-11-29T11:54:41.000Z
|
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.common import config as agent_config
from neutron.agent import l3_agent
from neutron.agent import l3_ha_agent
from neutron.agent.linux import interface
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.openstack.common import uuidutils
from neutron.services.firewall.agents.varmour import varmour_router
from neutron.services.firewall.agents.varmour import varmour_utils
from neutron.services.firewall.drivers.varmour import varmour_fwaas
from neutron.tests import base
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_DIRECTOR = '1.1.1.1'
class TestBasicRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRouterOperations, self).setUp()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(varmour_router.vArmourL3NATAgent.OPTS)
self.conf.register_opts(l3_ha_agent.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_use_namespaces_opts_helper(self.conf)
agent_config.register_root_helper(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.root_helper = 'sudo'
self.conf.state_path = ''
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.makedirs_p = mock.patch('os.makedirs')
self.makedirs = self.makedirs_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
mock.patch('neutron.agent.l3_agent.L3PluginApi').start()
self.looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
def _create_router(self):
router = varmour_router.vArmourL3NATAgent(HOSTNAME, self.conf)
router.rest.server = FAKE_DIRECTOR
router.rest.user = 'varmour'
router.rest.passwd = 'varmour'
return router
def _create_fwaas(self):
fwaas = varmour_fwaas.vArmourFwaasDriver()
fwaas.rest.server = FAKE_DIRECTOR
fwaas.rest.user = 'varmour'
fwaas.rest.passwd = 'varmour'
return fwaas
def _del_all_internal_ports(self, router):
router[l3_constants.INTERFACE_KEY] = []
def _del_internal_ports(self, router, port_idx):
del router[l3_constants.INTERFACE_KEY][port_idx]
def _add_internal_ports(self, router, port_count=1):
self._del_all_internal_ports(router)
for i in range(port_count):
port = {'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '10.0.%s.4' % i,
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '10.0.%s.0/24' % i,
'gateway_ip': '10.0.%s.1' % i}}
router[l3_constants.INTERFACE_KEY].append(port)
def _del_all_floating_ips(self, router):
router[l3_constants.FLOATINGIP_KEY] = []
def _del_floating_ips(self, router, port_idx):
del router[l3_constants.FLOATINGIP_KEY][port_idx]
def _add_floating_ips(self, router, port_count=1):
self._del_all_floating_ips(router)
for i in range(port_count):
fip = {'id': _uuid(),
'port_id': router['gw_port']['id'],
'floating_ip_address': '172.24.4.%s' % (100 + i),
'fixed_ip_address': '10.0.0.%s' % (100 + i)}
router[l3_constants.FLOATINGIP_KEY].append(fip)
def _prepare_router_data(self, enable_snat=None):
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '172.24.4.2',
'subnet_id': _uuid()}],
'subnet': {'cidr': '172.24.4.0/24',
'gateway_ip': '172.24.4.1'},
'ip_cidr': '172.24.4.226/28'}
int_ports = []
router = {
'id': router_id,
l3_constants.INTERFACE_KEY: int_ports,
'routes': [],
'gw_port': ex_gw_port}
if enable_snat is not None:
router['enable_snat'] = enable_snat
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
return ri
def _add_firewall_rules(self, fw, rule_count=1):
rules = []
for i in range(rule_count):
rule = {'id': _uuid(),
'enabled': True,
'action': 'deny' if (i % 2 == 0) else 'allow',
'ip_version': 4,
'protocol': 'tcp',
'source_ip_address': '10.0.0.%s/24' % (100 + i),
'destination_port': '%s' % (100 + i)}
rules.append(rule)
fw['firewall_rule_list'] = rules
def _prepare_firewall_data(self):
fw = {'id': _uuid(),
'admin_state_up': True,
'firewall_rule_list': []}
return fw
def test_firewall_without_rule(self):
router = self._create_router()
fwaas = self._create_fwaas()
try:
router.rest.auth()
except Exception:
# skip the test, firewall is not deployed
return
ri = self._prepare_router_data(enable_snat=True)
self._add_internal_ports(ri.router, port_count=1)
self._add_floating_ips(ri.router, port_count=1)
router._router_added(ri.router['id'], ri.router)
rl = [ri]
fw = self._prepare_firewall_data()
fwaas.create_firewall(rl, fw)
url = varmour_utils.REST_URL_CONF_POLICY
prefix = varmour_utils.get_firewall_object_prefix(ri, fw)
n = fwaas.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0)
fwaas.delete_firewall(rl, fw)
n = fwaas.rest.count_cfg_objs(url, prefix)
self.assertEqual(n, 0)
router._router_removed(ri.router['id'])
def test_firewall_with_rules(self):
router = self._create_router()
fwaas = self._create_fwaas()
try:
router.rest.auth()
except Exception:
# skip the test, firewall is not deployed
return
ri = self._prepare_router_data(enable_snat=True)
self._add_internal_ports(ri.router, port_count=1)
self._add_floating_ips(ri.router, port_count=1)
router._router_added(ri.router['id'], ri.router)
rl = [ri]
fw = self._prepare_firewall_data()
self._add_firewall_rules(fw, 2)
fwaas.create_firewall(rl, fw)
prefix = varmour_utils.get_firewall_object_prefix(ri, fw)
pol_url = varmour_utils.REST_URL_CONF_POLICY
serv_url = varmour_utils.REST_URL_CONF_SERVICE
addr_url = varmour_utils.REST_URL_CONF_ADDR
# 3x number of policies
n = fwaas.rest.count_cfg_objs(pol_url, prefix)
self.assertEqual(n, 6)
n = fwaas.rest.count_cfg_objs(addr_url, prefix)
self.assertEqual(n, 2)
n = fwaas.rest.count_cfg_objs(serv_url, prefix)
self.assertEqual(n, 2)
fwaas.delete_firewall(rl, fw)
n = fwaas.rest.count_cfg_objs(pol_url, prefix)
self.assertEqual(n, 0)
router._router_removed(ri.router['id'])
def test_firewall_add_remove_rules(self):
router = self._create_router()
fwaas = self._create_fwaas()
try:
router.rest.auth()
except Exception:
# skip the test, firewall is not deployed
return
ri = self._prepare_router_data(enable_snat=True)
self._add_internal_ports(ri.router, port_count=1)
self._add_floating_ips(ri.router, port_count=1)
router._router_added(ri.router['id'], ri.router)
rl = [ri]
fw = self._prepare_firewall_data()
self._add_firewall_rules(fw, 2)
fwaas.create_firewall(rl, fw)
prefix = varmour_utils.get_firewall_object_prefix(ri, fw)
pol_url = varmour_utils.REST_URL_CONF_POLICY
serv_url = varmour_utils.REST_URL_CONF_SERVICE
addr_url = varmour_utils.REST_URL_CONF_ADDR
# 3x number of policies
n = fwaas.rest.count_cfg_objs(pol_url, prefix)
self.assertEqual(n, 6)
n = fwaas.rest.count_cfg_objs(addr_url, prefix)
self.assertEqual(n, 2)
n = fwaas.rest.count_cfg_objs(serv_url, prefix)
self.assertEqual(n, 2)
self._add_firewall_rules(fw, 1)
fwaas.create_firewall(rl, fw)
n = fwaas.rest.count_cfg_objs(pol_url, prefix)
self.assertEqual(n, 3)
n = fwaas.rest.count_cfg_objs(addr_url, prefix)
self.assertEqual(n, 1)
n = fwaas.rest.count_cfg_objs(serv_url, prefix)
self.assertEqual(n, 1)
fwaas.delete_firewall(rl, fw)
n = fwaas.rest.count_cfg_objs(pol_url, prefix)
self.assertEqual(n, 0)
router._router_removed(ri.router['id'])
| 37.003413
| 79
| 0.625254
|
c89b5952aee6d0a08245a3b5d49b3c5ec3bd5098
| 1,485
|
py
|
Python
|
src/models/model_monitor.py
|
genbid007-ml/churn_model
|
cfd91bafd5089b4a29008305702e66d8f70d67b6
|
[
"MIT"
] | null | null | null |
src/models/model_monitor.py
|
genbid007-ml/churn_model
|
cfd91bafd5089b4a29008305702e66d8f70d67b6
|
[
"MIT"
] | null | null | null |
src/models/model_monitor.py
|
genbid007-ml/churn_model
|
cfd91bafd5089b4a29008305702e66d8f70d67b6
|
[
"MIT"
] | null | null | null |
import yaml
import argparse
import pandas as pd
from evidently.dashboard import Dashboard
from evidently.tabs import DataDriftTab, CatTargetDriftTab
def read_params(config_path):
"""
read parameters from the params.yaml file
input: params.yaml location
output: parameters as dictionary
"""
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config
def model_monitoring(config_path):
config = read_params(config_path)
train_data_path = config["raw_data_config"]["raw_data_csv"]
new_train_data_path = config["raw_data_config"]["new_train_data_csv"]
target = config["raw_data_config"]["target"]
monitor_dashboard_path = config["model_monitor"]["monitor_dashboard_html"]
monitor_target = config["model_monitor"]["target_col_name"]
ref = pd.read_csv(train_data_path)
cur = pd.read_csv(new_train_data_path)
ref = ref.rename(columns={target: monitor_target}, inplace=False)
cur = cur.rename(columns={target: monitor_target}, inplace=False)
data_and_target_drift_dashboard = Dashboard(tabs=[DataDriftTab, CatTargetDriftTab])
data_and_target_drift_dashboard.calculate(ref, cur, column_mapping=None)
data_and_target_drift_dashboard.save(monitor_dashboard_path)
if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
model_monitoring(config_path=parsed_args.config)
| 35.357143
| 87
| 0.758249
|
8e60971ac06c9e3758e1e063442de81cb6c976d7
| 425
|
py
|
Python
|
backend/api/admin.py
|
sperrys/YEF_DEBUG
|
8e09342ef9639c4f6abe1390f37dddb8e398784c
|
[
"MIT"
] | 2
|
2018-12-10T03:14:31.000Z
|
2019-03-27T16:20:36.000Z
|
backend/api/admin.py
|
sperrys/YEF_DEBUG
|
8e09342ef9639c4f6abe1390f37dddb8e398784c
|
[
"MIT"
] | 22
|
2018-12-06T23:54:20.000Z
|
2019-04-17T18:15:43.000Z
|
backend/api/admin.py
|
sperrys/YEF_DEBUG
|
8e09342ef9639c4f6abe1390f37dddb8e398784c
|
[
"MIT"
] | 1
|
2020-11-03T05:27:10.000Z
|
2020-11-03T05:27:10.000Z
|
from django.contrib import admin
from api.models import Team, Tournament, Round, MemberPoint, JudgePoint, MatchUp, Judge, Member, Club
# Register your models here.
admin.site.register(Tournament)
admin.site.register(Team)
admin.site.register(Round)
admin.site.register(MemberPoint)
admin.site.register(JudgePoint)
admin.site.register(MatchUp)
admin.site.register(Judge)
admin.site.register(Member)
admin.site.register(Club)
| 30.357143
| 101
| 0.814118
|
33247b217b290f05dcb38bc24b83683bd93e64fa
| 2,044
|
py
|
Python
|
session/pyrogram.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | null | null | null |
session/pyrogram.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | null | null | null |
session/pyrogram.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | 1
|
2021-08-30T12:28:55.000Z
|
2021-08-30T12:28:55.000Z
|
import logging
import os
import time
import motor.motor_asyncio
from pyrogram import Client
from .config_var import Config
# Note StartUp Time - To Capture Uptime.
start_time = time.time()
speedo_version = "0.1"
# Enable Logging For Pyrogram
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - [Speedo] - %(levelname)s - %(message)s",
)
logging.getLogger("pyrogram").setLevel(logging.ERROR)
logging.getLogger("apscheduler").setLevel(logging.ERROR)
mongo_client = motor.motor_asyncio.AsyncIOMotorClient(Config.MONGO_DB)
CMD_LIST = {}
XTRA_CMD_LIST = {}
sudo_id = Config.AFS
if not Config.STRINGSESSION:
logging.error("No String Session Found! Speedo is Exiting!")
quit(1)
if not Config.API_ID:
logging.error("No Api-ID Found! Speedo is Exiting!")
quit(1)
if not Config.API_HASH:
logging.error("No ApiHash Found! Speedo is Exiting!")
quit(1)
if not Config.LOG_GRP:
logging.error("No Log Group ID Found! Speedo is Exiting!")
quit(1)
# Clients - Upto 4 Clients is Supported!
if Config.STRINGSESSION:
Speedo = Client(
Config.STRINGSESSION,
api_id=Config.API_ID,
api_hash=Config.API_HASH,
sleep_threshold=180,
)
if Config.STRINGSESSION_2:
Speedo2 = Client(
Config.STRINGSESSION_2,
api_id=Config.API_ID,
api_hash=Config.API_HASH,
sleep_threshold=180,
)
else:
Speedo2 = None
if Config.STRINGSESSION_3:
Speedo3 = Client(
Config.STRINGSESSION_3,
api_id=Config.API_ID,
api_hash=Config.API_HASH,
sleep_threshold=180,
)
else:
Speedo3 = None
if Config.STRINGSESSION_4:
Speedo4 = Client(
Config.STRINGSESSION_4,
api_id=Config.API_ID,
api_hash=Config.API_HASH,
sleep_threshold=180,
)
else:
Speedo4 = None
if Config.BOT_TOKEN:
bot = Client(
"MyAssistant",
api_id=Config.API_ID,
api_hash=Config.API_HASH,
bot_token=Config.BOT_TOKEN,
sleep_threshold=180,
)
else:
bot = None
| 22.217391
| 70
| 0.677104
|
f174cb1ff031740b0968a47b00108504fb91af85
| 2,806
|
py
|
Python
|
sle_gan/data.py
|
kitbogashow/SLE-GAN
|
6a7a6398d5124720315b92105e3a813934bb07bd
|
[
"MIT"
] | 63
|
2020-11-22T13:32:23.000Z
|
2022-03-31T07:50:53.000Z
|
sle_gan/data.py
|
kitbogashow/SLE-GAN
|
6a7a6398d5124720315b92105e3a813934bb07bd
|
[
"MIT"
] | 5
|
2020-11-27T06:26:29.000Z
|
2022-01-14T04:42:53.000Z
|
sle_gan/data.py
|
kitbogashow/SLE-GAN
|
6a7a6398d5124720315b92105e3a813934bb07bd
|
[
"MIT"
] | 11
|
2020-12-03T11:34:43.000Z
|
2022-02-22T02:14:19.000Z
|
from functools import partial
import tensorflow as tf
def create_input_noise(batch_size: int):
return tf.random.normal(shape=(batch_size, 1, 1, 256), mean=0.0, stddev=1.0, dtype=tf.float32)
def read_image_from_path(image_path):
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image)
return image
def preprocess_images(images, resolution: int):
"""
Resize and normalize the images tot he range [-1, 1]
Args:
images: batch of images (B, H, W, C)
Returns:
resized and normalized images
"""
images = tf.image.resize(images, (resolution, resolution))
images = tf.cast(images, tf.float32) - 127.5
images = images / 127.5
return images
def postprocess_images(images, dtype=tf.float32):
"""
De-Normalize the images to the range [0, 255]
Args:
images: batch of normalized images
dtype: target dtype
Returns:
de-normalized images
"""
images = (images * 127.5) + 127.5
images = tf.cast(images, dtype)
return images
def create_dataset(batch_size: int,
folder: str,
resolution: int,
use_flip_augmentation: bool = True,
image_extension: str = "jpg",
shuffle_buffer_size: int = 100):
dataset = tf.data.Dataset.list_files(folder + f"/*.{image_extension}")
dataset = dataset.map(read_image_from_path)
if use_flip_augmentation:
dataset = dataset.map(tf.image.flip_left_right)
dataset = dataset.map(partial(preprocess_images, resolution=resolution))
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def center_crop_images(images, crop_resolution: int):
"""
Crops the center of the images
Args:
images: shape: (B, H, W, 3), H should be equal to W
crop_resolution: target resolution for the crop
Returns:
cropped images which has the shape: (B, crop_resolution, crop_resolution, 3)
"""
crop_resolution = tf.cast(crop_resolution, tf.float32)
half_of_crop_resolution = crop_resolution / 2
image_height = tf.cast(tf.shape(images)[1], tf.float32)
image_center = image_height / 2
from_ = int(image_center - half_of_crop_resolution)
to_ = int(image_center + half_of_crop_resolution)
return images[:, from_:to_, from_:to_, :]
def get_test_images(batch_size: int, folder: str, resolution: int):
dataset = create_dataset(batch_size, str(folder), resolution=resolution, use_flip_augmentation=False,
shuffle_buffer_size=1)
for x in dataset.take(1):
return x
| 31.177778
| 121
| 0.643977
|
67472a179edc48f5c65f30ee14c6f330e7f5fd0d
| 2,138
|
py
|
Python
|
fractal/contrib/gcp/firestore/specifications.py
|
douwevandermeij/fractal
|
66b04892b4d6fd8ee6a0c07b6e230f4321165085
|
[
"MIT"
] | 2
|
2021-08-12T05:19:08.000Z
|
2022-01-29T16:22:37.000Z
|
fractal/contrib/gcp/firestore/specifications.py
|
douwevandermeij/fractal
|
66b04892b4d6fd8ee6a0c07b6e230f4321165085
|
[
"MIT"
] | null | null | null |
fractal/contrib/gcp/firestore/specifications.py
|
douwevandermeij/fractal
|
66b04892b4d6fd8ee6a0c07b6e230f4321165085
|
[
"MIT"
] | null | null | null |
from typing import Collection, Optional
from fractal.core.exceptions import DomainException
from fractal.core.specifications.generic.collections import AndSpecification
from fractal.core.specifications.generic.operators import (
EqualsSpecification,
GreaterThanEqualSpecification,
GreaterThanSpecification,
InSpecification,
LessThanEqualSpecification,
LessThanSpecification,
)
from fractal.core.specifications.generic.specification import Specification
class SpecificationNotMappedToFirestore(DomainException):
code = "SPECIFICATION_NOT_MAPPED_TO_FIRESTORE"
status_code = 500
class FirestoreSpecificationBuilder:
@staticmethod
def build(specification: Specification = None) -> Optional[Collection]:
if specification is None:
return None
elif isinstance(specification, AndSpecification):
return [
FirestoreSpecificationBuilder.build(spec)
for spec in specification.to_collection()
]
elif isinstance(specification, InSpecification):
return specification.field, "in", specification.value
elif isinstance(specification, EqualsSpecification):
return specification.field, "==", specification.value
elif isinstance(specification, LessThanSpecification):
return specification.field, "<", specification.value
elif isinstance(specification, LessThanEqualSpecification):
return specification.field, "<=", specification.value
elif isinstance(specification, GreaterThanSpecification):
return specification.field, ">", specification.value
elif isinstance(specification, GreaterThanEqualSpecification):
return specification.field, ">=", specification.value
elif isinstance(specification.to_collection(), dict):
return [
(key, "==", value)
for key, value in dict(specification.to_collection()).items()
]
raise SpecificationNotMappedToFirestore(
f"Specification '{specification}' not mapped to Firestore query."
)
| 41.921569
| 77
| 0.706735
|
58383f77c851ea9de5dcf86f1a4fb7ee54fc714f
| 12,862
|
py
|
Python
|
django/sierra/export/tasks.py
|
Miamiohlibs/catalog-api
|
dee81a808bbe55e4c8325945ebb559edba593819
|
[
"BSD-3-Clause"
] | null | null | null |
django/sierra/export/tasks.py
|
Miamiohlibs/catalog-api
|
dee81a808bbe55e4c8325945ebb559edba593819
|
[
"BSD-3-Clause"
] | null | null | null |
django/sierra/export/tasks.py
|
Miamiohlibs/catalog-api
|
dee81a808bbe55e4c8325945ebb559edba593819
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import logging
import sys, traceback
import pysolr
from django.core import mail
from django import template
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone as tz
from django.db import connections
from celery import Task, shared_task, group, chain
from . import exporter
from . import models as export_models
from .operror import OperationalError
# set up logger, for debugging
logger = logging.getLogger('sierra.custom')
class DispatchErrorTask(Task):
'''
Subclasses celery.Task to provide custom on_failure error handling.
This is for the export_dispatch task. It's needed because other
tasks have a different number of arguments. (For future: change
args to kwargs so we can have one Error Task class.)
'''
def on_failure(self, exc, task_id, args, kwargs, einfo):
message = 'Task {} failed: {}.'.format(task_id, exc)
log_task_error(*[i for i in args[:4]], message=message)
class ErrorTask(Task):
'''
Subclasses celery.Task to provide custom on_failure error handling.
'''
def on_failure(self, exc, task_id, args, kwargs, einfo):
message = 'Task {} failed: {}.'.format(task_id, exc)
log_task_error(*[i for i in args[1:5]], message=message)
@shared_task
def optimize():
'''
Celery task that simply runs "optimize" on all Solr indexes
'''
logger = logging.getLogger('exporter.file')
logger.info('Running optimization on all Solr indexes.')
url_stack = []
for index, options in settings.HAYSTACK_CONNECTIONS.iteritems():
if options['URL'] not in url_stack:
conn = pysolr.Solr(options['URL'],
timeout=options['TIMEOUT'])
logger.info('Optimizing {} index.'.format(index))
conn.optimize()
url_stack.append(options['URL'])
logger.info('Done.')
def trigger_export(instance, export_filter, export_type, options):
'''
Non-task wrapper function for our task chain. Call this from the
view so that we can keep the implementation details of tasks
separate from the view logic.
'''
connections['default'].close()
args = (instance.pk, export_filter, export_type, options)
try:
et = export_models.ExportType.objects.get(pk=export_type)
except OperationalError:
et = export_models.ExportType.objects.get(pk=export_type)
exporter_class = et.get_exporter_class()
exp = exporter_class(*args)
exp.status = 'waiting'
exp.save_status()
exp.log('Info', 'Export {} task triggered. Waiting on task to be '
'scheduled.'.format(instance.pk))
export_dispatch.apply_async(args,
link_error=do_final_cleanup.s(*args, status='errors')
)
@shared_task(base=DispatchErrorTask)
def export_dispatch(instance_pk, export_filter, export_type, options):
'''
Control function for doing an export job.
'''
# The below checks to see if this was a job triggered by Celery's
# automatic scheduler, in which case the instance_pk is (should be)
# -1. If this is the case, it generates a new export instance
# object using the username in the EXPORT_AUTOMATED_USERNAME
# setting as the user. Default is django_admin.
connections['default'].close()
if instance_pk == -1:
user = User.objects.get(username=settings.EXPORTER_AUTOMATED_USERNAME)
instance = export_models.ExportInstance(
export_filter_id=export_filter,
export_type_id=export_type,
timestamp=tz.now(),
user=user,
status_id='in_progress'
)
try:
instance.save()
except OperationalError:
instance.save()
instance_pk = instance.pk
args = [instance_pk, export_filter, export_type, options]
try:
et = export_models.ExportType.objects.get(pk=export_type)
except OperationalError:
et = export_models.ExportType.objects.get(pk=export_type)
exporter_class = et.get_exporter_class()
try:
exp = exporter_class(*args, log_label=settings.TASK_LOG_LABEL)
except OperationalError:
exp = exporter_class(*args, log_label=settings.TASK_LOG_LABEL)
exp.log('Info', 'Job received.')
exp.status = 'in_progress'
exp.save_status()
exp.log('Info', '-------------------------------------------------------')
exp.log('Info', 'EXPORTER {} -- {}'.format(exp.instance.pk, et.code))
exp.log('Info', '-------------------------------------------------------')
exp.log('Info', 'MAX RECORD CHUNK size is {}.'.format(exp.max_rec_chunk))
exp.log('Info', 'MAX DELETION CHUNK size is {}.'.format(exp.max_del_chunk))
try:
records = exp.get_records()
deletions = exp.get_deletions()
except exporter.ExportError as err:
exp.log('Error', err)
exp.status = 'errors'
exp.save_status()
else:
# Get records and deletions counts. If it's a queryset we want
# to use count(), otherwise we have to use len(). Lists have a
# count() method, but it's not the same as queryset.count().
# Lists throw a TypeError if you call count() without an arg.
try:
records_count = records.count()
except (TypeError, AttributeError):
records_count = len(records)
try:
deletions_count = deletions.count()
except (TypeError, AttributeError):
try:
deletions_count = len(deletions)
except Exception:
deletions_count = 0
count = {'record': records_count, 'deletion': deletions_count}
exp.log('Info', '{} records found.'.format(count['record']))
if deletions is not None:
exp.log('Info', '{} candidates found for deletion.'
''.format(count['deletion']))
do_it_tasks = []
for type in count:
max = exp.max_rec_chunk if type == 'record' else exp.max_del_chunk
task_args = []
batches = 0
for start in range(0, count[type], max):
end = min(start + max, count[type])
i_args = [{}] + args if start == 0 or exp.parallel else args
do_it_tasks.append(
do_export_chunk.s(*i_args, start=start, end=end, type=type)
)
batches += 1
if batches > 0:
exp.log('Info', 'Breaking {}s into {} chunk{}.'
''.format(type, batches, 's' if batches > 1 else ''))
if do_it_tasks:
if exp.parallel:
final_grouping = chain(group(do_it_tasks),
do_final_cleanup.s(*args))
else:
final_grouping = chain(chain(do_it_tasks),
do_final_cleanup.s(*args))
final_grouping.apply_async(
link_error=do_final_cleanup.s(*args, status='errors')
)
else:
args = [{}] + args
do_final_cleanup.s(*args).apply_async()
@shared_task(base=ErrorTask)
def do_export_chunk(vals, instance_pk, export_filter, export_type, options,
start, end, type):
'''
Processes a "chunk" of Exporter records, depending on type
("record" if it's a record load or "deletion" if it's a deletion).
Variable vals should be a dictionary of arbitrary values used to
pass information from task to task.
'''
connections['default'].close()
try:
et = export_models.ExportType.objects.get(pk=export_type)
except OperationalError:
et = export_models.ExportType.objects.get(pk=export_type)
exporter_class = et.get_exporter_class()
try:
exp = exporter_class(instance_pk, export_filter, export_type, options,
log_label=settings.TASK_LOG_LABEL)
except OperationalError:
exp = exporter_class(instance_pk, export_filter, export_type, options,
log_label=settings.TASK_LOG_LABEL)
records = exp.get_records() if type == 'record' else exp.get_deletions()
# This is sort of a hack. My strategy initially was to use queryset
# slicing to get chunks we need, but apparently this doesn't work
# with prefetch_related--that is, it prefetches data for the ENTIRE
# friggin queryset despite the slice and makes us run out of memory
# on large jobs. Instead of slicing we actually have to use a
# filter before it correctly limits the prefetch. So we slice up
# PKs instead and use that as a basis for a filter.
try:
pks = list(records.prefetch_related(None).order_by('pk')[start:end+1])
pk_a = pks[0].pk
pk_n = pks[-1].pk
if type == 'record':
records = exp.get_records()
else:
records = exp.get_deletions()
records = records.order_by('pk').filter(pk__gte=pk_a, pk__lte=pk_n)
except AttributeError:
if records is not None:
records = records[start:end+1]
job_id = '{}s {} - {}'.format(type, start+1, end)
exp.log('Info', 'Starting processing {}.'.format(job_id))
try:
if type == 'record' and records is not None:
vals = exp.export_records(records, vals=vals)
elif records is not None:
vals = exp.delete_records(records, vals=vals)
except Exception as err:
ex_type, ex, tb = sys.exc_info()
logger.info(traceback.extract_tb(tb))
exp.log('Error', 'Error processing {}: {}.'.format(job_id, err))
else:
exp.log('Info', 'Finished processing {}.'.format(job_id))
return vals
@shared_task(base=ErrorTask)
def do_final_cleanup(vals, instance_pk, export_filter, export_type, options,
status='success'):
'''
Task that runs after all sub-tasks for an export job are done.
Does final clean-up steps, such as updating the ExportInstance
status, triggering the final callback function on the export job,
emailing site admins if there were errors, etc.
'''
connections['default'].close()
try:
et = export_models.ExportType.objects.get(pk=export_type)
except OperationalError:
et = export_models.ExportType.objects.get(pk=export_type)
exporter_class = et.get_exporter_class()
try:
exp = exporter_class(instance_pk, export_filter, export_type, options,
log_label=settings.TASK_LOG_LABEL)
except OperationalError:
exp = exporter_class(instance_pk, export_filter, export_type, options,
log_label=settings.TASK_LOG_LABEL)
exp.final_callback(vals, status)
errors = exp.instance.errors
warnings = exp.instance.warnings
if status == 'success':
if errors > 0:
status = 'done_with_errors'
exp.log('Info', 'Job finished.')
elif status == 'errors':
exp.log('Warning', 'Job terminated prematurely.')
exp.status = status
exp.save_status()
(send_errors, send_warnings) = (None, None)
if errors > 0 and settings.EXPORTER_EMAIL_ON_ERROR:
subject = '{} Exporter Errors'.format(
exp.instance.export_type.code)
send_errors = errors
if warnings > 0 and settings.EXPORTER_EMAIL_ON_WARNING:
subject = '{} Exporter Warnings'.format(
exp.instance.export_type.code)
send_warnings = warnings
if send_errors or send_warnings:
logfile = settings.LOGGING['handlers']['export_file']['filename']
vars = template.Context({
'i': exp.instance,
'errors': send_errors,
'warnings': send_warnings,
'logfile': logfile
})
if send_errors and send_warnings:
subject = '{} Exporter Errors and Warnings'.format(
exp.instance.export_type.code)
email = template.loader.get_template('export/error_email.txt')
mail.mail_admins(subject, email.render(vars))
def log_task_error(instance_pk, export_filter, export_type, options, message):
connections['default'].close()
try:
et = export_models.ExportType.objects.get(pk=export_type)
except OperationalError:
et = export_models.ExportType.objects.get(pk=export_type)
exporter_class = et.get_exporter_class()
try:
exp = exporter_class(instance_pk, export_filter, export_type, options,
log_label=settings.TASK_LOG_LABEL)
except OperationalError:
exp = exporter_class(instance_pk, export_filter, export_type, options,
log_label=settings.TASK_LOG_LABEL)
exp.log('Error', message)
| 39.453988
| 79
| 0.625641
|
e24f22dab45888b99d720ae86257fa7c2874d468
| 13,541
|
py
|
Python
|
robot/OutboundFundsNPSP/resources/OutboundFundsNPSP.py
|
SalesforceFoundation/OutboundFundsNPSP
|
99072a88574d6daff4c186fa337eb99b7f341497
|
[
"BSD-3-Clause"
] | 2
|
2018-08-03T17:12:08.000Z
|
2020-05-23T21:18:40.000Z
|
robot/OutboundFundsNPSP/resources/OutboundFundsNPSP.py
|
SalesforceFoundation/OutboundFundsModuleNPSP
|
68809978337ff0cf68cc73923de7cbd2e4dc4929
|
[
"BSD-3-Clause"
] | 23
|
2021-03-29T19:41:15.000Z
|
2022-02-23T19:51:36.000Z
|
robot/OutboundFundsNPSP/resources/OutboundFundsNPSP.py
|
SFDO-Community/OutboundFundsNPSP
|
68809978337ff0cf68cc73923de7cbd2e4dc4929
|
[
"BSD-3-Clause"
] | 4
|
2018-11-21T19:45:54.000Z
|
2020-01-14T22:03:05.000Z
|
import logging
import random
import string
import warnings
from BaseObjects import BaseOutboundFundsNPSPPage
from robot.libraries.BuiltIn import RobotNotRunningError
from locators_52 import outboundfundsnpsp_lex_locators as locators_52
from locators_51 import outboundfundsnpsp_lex_locators as locators_51
from cumulusci.robotframework.utils import selenium_retry, capture_screenshot_on_error
locators_by_api_version = {
51.0: locators_51, # Spring '21
52.0: locators_52, # Summer '21
}
# will get populated in _init_locators
outboundfundsnpsp_lex_locators = {}
@selenium_retry
class OutboundFundsNPSP(BaseOutboundFundsNPSPPage):
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LIBRARY_VERSION = 1.0
def __init__(self, debug=False):
self.debug = debug
self.current_page = None
self._session_records = []
# Turn off info logging of all http requests
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARN
)
self._init_locators()
def _init_locators(self):
try:
client = self.cumulusci.tooling
response = client._call_salesforce(
"GET", "https://{}/services/data".format(client.sf_instance)
)
self.latest_api_version = float(response.json()[-1]["version"])
if self.latest_api_version not in locators_by_api_version:
warnings.warn(
"Could not find locator library for API %d"
% self.latest_api_version
)
self.latest_api_version = max(locators_by_api_version.keys())
except RobotNotRunningError:
# We aren't part of a running test, likely because we are
# generating keyword documentation. If that's the case, assume
# the latest supported version
self.latest_api_version = max(locators_by_api_version.keys())
locators = locators_by_api_version[self.latest_api_version]
outboundfundsnpsp_lex_locators.update(locators)
def get_outboundfundsnpsp_lex_locators(self, path, *args, **kwargs):
""" Returns a rendered locator string from the outboundfundsnpsp_lex_locators
dictionary. This can be useful if you want to use an element in
a different way than the built in keywords allow.
"""
locator = outboundfundsnpsp_lex_locators
for key in path.split("."):
locator = locator[key]
main_loc = locator.format(*args, **kwargs)
return main_loc
def get_namespace_prefix(self, name):
parts = name.split("__")
if parts[-1] == "c":
parts = parts[:-1]
if len(parts) > 1:
return parts[0] + "__"
else:
return ""
def get_outfundsnpsp_namespace_prefix(self):
if not hasattr(self.cumulusci, "_describe_result"):
self.cumulusci._describe_result = self.cumulusci.sf.describe()
objects = self.cumulusci._describe_result["sobjects"]
fundingprogram_object = [o for o in objects if o["label"] == "Funding Program"][
0
]
return self.get_namespace_prefix(fundingprogram_object["name"])
def get_outfundsnpspext_namespace_prefix(self):
if not hasattr(self.cumulusci, "_describe_result"):
self.cumulusci._describe_result = self.cumulusci.sf.describe()
objects = self.cumulusci._describe_result["sobjects"]
gauexp_object = [o for o in objects if o["label"] == "GAU Expenditure"][0]
return self.get_namespace_prefix(gauexp_object["name"])
def get_npsp_namespace_prefix(self):
if not hasattr(self.cumulusci, "_describe_result"):
self.cumulusci._describe_result = self.cumulusci.sf.describe()
objects = self.cumulusci._describe_result["sobjects"]
gau_object = [o for o in objects if o["label"] == "General Accounting Unit"][0]
return self.get_namespace_prefix(gau_object["name"])
def get_outboundfundsnpsp_locator(self, path, *args, **kwargs):
""" Returns a rendered locator string from the npsp_lex_locators
dictionary. This can be useful if you want to use an element in
a different way than the built in keywords allow.
"""
locator = outboundfundsnpsp_lex_locators
for key in path.split("."):
locator = locator[key]
main_loc = locator.format(*args, **kwargs)
return main_loc
def _check_if_element_exists(self, xpath):
"""Checks if the given xpath exists
this is only a helper function being called from other keywords
"""
elements = int(self.selenium.get_element_count(xpath))
return True if elements > 0 else False
def check_if_element_exists(self, xpath):
"""Checks if an element with given xpath exists"""
elements = self.selenium.get_element_count(xpath)
return True if elements > 0 else False
def new_random_string(self, len=5):
"""Generate a random string of fixed length """
return "".join(random.choice(string.ascii_lowercase) for _ in range(len))
def generate_new_string(self, prefix="Robot Test"):
"""Generates a random string with Robot Test added as prefix"""
return "{PREFIX} {RANDOM}".format(
PREFIX=prefix, RANDOM=self.new_random_string(len=5)
)
def random_email(self, prefix="robot_", suffix="example.com"):
"""
Return a random fake email address.
:param prefix: Some text to put in front of the randomized part of the username.
Defaults to "robot_"
:type prefix: str
:param suffix: The domain part of the email address.
Defaults to "example.com"
:type suffix: str
:returns: The fake email address.
:rtype: str
"""
return "{PREFIX}{RANDOM}@{SUFFIX}".format(
PREFIX=prefix, RANDOM=self.new_random_string(len=5), SUFFIX=suffix
)
@capture_screenshot_on_error
def click_link_with_text(self, text):
"""Click on link with passed text"""
locator = outboundfundsnpsp_lex_locators["link"].format(text)
self.selenium.wait_until_page_contains_element(locator)
element = self.selenium.driver.find_element_by_xpath(locator)
self.selenium.driver.execute_script("arguments[0].click()", element)
@capture_screenshot_on_error
def click_save(self):
"""Click Save button in modal's footer"""
locator = outboundfundsnpsp_lex_locators["new_record"]["footer_button"].format(
"Save"
)
self.selenium.scroll_element_into_view(locator)
self.salesforce._jsclick(locator)
self.salesforce.wait_until_loading_is_complete()
@capture_screenshot_on_error
def validate_field_value(self, field, status, value, section=None):
"""If status is 'contains' then the specified value should be present in the field
'does not contain' then the specified value should not be present in the field
"""
if section is not None:
section = "text:" + section
self.selenium.scroll_element_into_view(section)
list_found = False
locators = outboundfundsnpsp_lex_locators["confirm"].values()
if status == "contains":
for i in locators:
print("inside for loop")
locator = i.format(field, value)
print(locator)
if self.check_if_element_exists(locator):
print(f"element exists {locator}")
actual_value = self.selenium.get_webelement(locator).text
print(f"actual value is {actual_value}")
assert (
value == actual_value
), "Expected {} value to be {} but found {}".format(
field, value, actual_value
)
list_found = True
break
if status == "does not contain":
for i in locators:
locator = i.format(field, value)
if self.check_if_element_exists(locator):
print(f"locator is {locator}")
raise Exception(f"{field} should not contain value {value}")
list_found = True
assert list_found, "locator not found"
@capture_screenshot_on_error
def click_tab(self, label):
"""Click on a tab on a record page"""
locator = outboundfundsnpsp_lex_locators["tab"]["tab_header"].format(label)
self.selenium.wait_until_element_is_enabled(
locator, error="Tab button is not available"
)
element = self.selenium.driver.find_element_by_xpath(locator)
self.selenium.driver.execute_script("arguments[0].click()", element)
def click_related_list_link_with_text(self, text):
"""Click on link with passed text in a related list table"""
locator = outboundfundsnpsp_lex_locators["related"]["flexi_link"].format(text)
self.selenium.wait_until_page_contains_element(locator)
element = self.selenium.driver.find_element_by_xpath(locator)
self.selenium.driver.execute_script("arguments[0].click()", element)
def click_related_list_wrapper_button(self, heading, button_title):
""" loads the related list and clicks on the button on the list """
locator = outboundfundsnpsp_lex_locators["related"]["flexi_button"].format(
heading, button_title
)
self.salesforce._jsclick(locator)
self.salesforce.wait_until_loading_is_complete()
@capture_screenshot_on_error
def save_disbursement(self):
"""Click Save Disbursement"""
locator = outboundfundsnpsp_lex_locators["details"]["button"].format("Save")
self.selenium.set_focus_to_element(locator)
self.selenium.get_webelement(locator).click()
def verify_row_count(self, value):
"""verifies if actual row count matches with expected value"""
locator = outboundfundsnpsp_lex_locators["related"]["count"]
actual_value = self.selenium.get_webelements(locator)
count = len(actual_value)
assert int(value) == count, "Expected value to be {} but found {}".format(
value, count
)
@capture_screenshot_on_error
def select_value_from_picklist(self, dropdown, value):
"""Select given value in the dropdown field"""
locator = outboundfundsnpsp_lex_locators["new_record"]["dropdown_field"].format(
dropdown
)
self.selenium.get_webelement(locator).click()
popup_loc = outboundfundsnpsp_lex_locators["new_record"]["dropdown_popup"]
self.selenium.wait_until_page_contains_element(
popup_loc, error="Picklist dropdown did not open"
)
value_loc = outboundfundsnpsp_lex_locators["new_record"][
"dropdown_value"
].format(value)
self.salesforce._jsclick(value_loc)
@capture_screenshot_on_error
def add_date(self, title, date):
""" Clicks on the 'Date' field in Form and picks a date in the argument """
locator = outboundfundsnpsp_lex_locators["new_record"]["date_field"].format(
title
)
self.selenium.set_focus_to_element(locator)
self.selenium.clear_element_text(locator)
self.selenium.get_webelement(locator).send_keys(date)
@capture_screenshot_on_error
def page_should_not_contain_locator(self, path, *args, **kwargs):
"""Waits for the locator specified to be not present on the page"""
main_loc = self.get_outboundfundsnpsp_lex_locators(path, *args, **kwargs)
self.selenium.wait_until_page_does_not_contain_element(main_loc, timeout=60)
def verify_button_status(self, **kwargs):
""" Verify the button is disabled/enabled, pass the name of the buttin
and the expected status of the buttin as either enabled or disabled"""
for key, value in kwargs.items():
locator = outboundfundsnpsp_lex_locators["button-with-text"].format(key)
self.selenium.wait_until_element_is_visible(
locator, error=f"'{key}' is not displayed on the page"
)
if value == "disabled":
actual_value = self.selenium.get_webelement(locator).get_attribute(
value
)
if actual_value is None or actual_value is False:
raise Exception(
f"Expected {key} status to be {value} but found {actual_value}"
)
elif value == "enabled":
actual_value = self.selenium.get_webelement(locator).get_attribute(
"disabled"
)
if not (actual_value is None or actual_value is False):
raise Exception(
f"Expected {key} status to be {value} but found {actual_value}"
)
def populate_field_with_id(self, id, value):
"""Populate field with id on manage expenditure page"""
locator = outboundfundsnpsp_lex_locators["id"].format(id)
if value == "null":
field = self.selenium.get_webelement(locator)
self.salesforce._clear(field)
else:
self.salesforce._populate_field(locator, value)
| 43.822006
| 90
| 0.642198
|
0a7b04a588c74e452af273b89e40dc2edfacdb94
| 1,170
|
py
|
Python
|
Python/Examples/Scripts/CycleTimeDisplayAll.py
|
halmusaibeli/RoboDK-API
|
e017aa26715bc8d0fcbbc05e57acc32f2d2d6174
|
[
"MIT"
] | null | null | null |
Python/Examples/Scripts/CycleTimeDisplayAll.py
|
halmusaibeli/RoboDK-API
|
e017aa26715bc8d0fcbbc05e57acc32f2d2d6174
|
[
"MIT"
] | null | null | null |
Python/Examples/Scripts/CycleTimeDisplayAll.py
|
halmusaibeli/RoboDK-API
|
e017aa26715bc8d0fcbbc05e57acc32f2d2d6174
|
[
"MIT"
] | null | null | null |
# This example shows how to quickly calculate the cycle time of all programs in the RoboDK station
#
# Important notes and tips for accurate cycle time calculation:
# https://robodk.com/doc/en/General.html#CycleTime
# Start the RoboDK API
from robodk.robolink import * # RoboDK API
RDK = Robolink()
writeline = "Program name\tProgram status (100%=OK)\tTravel length\tCycle Time"
msg_html = "<table border=1><tr><td>" + writeline.replace('\t', '</td><td>') + "</td></tr>"
# Ask the user to select a program
#program = RDK.ItemUserPick('Select a program', ITEM_TYPE_PROGRAM)
for program in RDK.ItemList(ITEM_TYPE_PROGRAM):
# Retrieve the robot linked to the selected program
#robot = program.getLink(ITEM_TYPE_ROBOT)
# Output the linear speed, joint speed and time (separated by tabs)
result = program.Update()
instructions, time, travel, ok, error = result
# Print the information
newline = "%s\t%.0f %%\t%.1f mm\t%.1f s" % (program.Name(), ok * 100, travel, time)
print(newline)
msg_html = msg_html + '<tr><td>' + newline.replace('\t', '</td><td>') + '</td></tr>'
msg_html = msg_html + '</table>'
RDK.ShowMessage(msg_html)
| 34.411765
| 98
| 0.689744
|
2f66d6202eb48f20d4769b2f1fae2edf49114bcc
| 1,667
|
py
|
Python
|
StocksBROTHER/emailing.py
|
romainledru/WebBROTHER
|
b212c98e6a59ed80c2f8371cab57c887eab16a09
|
[
"MIT"
] | null | null | null |
StocksBROTHER/emailing.py
|
romainledru/WebBROTHER
|
b212c98e6a59ed80c2f8371cab57c887eab16a09
|
[
"MIT"
] | null | null | null |
StocksBROTHER/emailing.py
|
romainledru/WebBROTHER
|
b212c98e6a59ed80c2f8371cab57c887eab16a09
|
[
"MIT"
] | null | null | null |
import smtplib
class Emailing():
def __init__(self):
self.server = smtplib.SMTP("smtp.gmail.com",587)
self.server.ehlo()
self.server.starttls()
self.server.ehlo()
self.destination = self.takeData("txt.username")
self.server.login(self.takeData("txt.username"),self.takeData("txt.pswd"))
# username and password are not communicated.
# I do not need to secure the file: I use only txt but with an other extention to distract the user from opening it :)
def takeData(self,dataName):
"""Security Function: sensible values are taken from external files
Args:
dataName (str): file path
Returns:
str: content of the file
"""
with open(dataName,"r") as file:
data = file.read()
file.close()
return data
def send(self,url,destination=None): # TO USER: by default, destination = sender. You can change the destination by adding an argument in the function call (in run.py)
"""Emailing Function: content of email is edited here
Args:
destination (str): email Adress from destination
url (str): this url link from the interesant stock is provided in the email
"""
if destination == None:
destination = self.destination
subject = "Prices are falling !"
body = "Check out the link: {}".format(url)
msg = f"Subject: {subject}\n\n{body}"
self.server.sendmail(self.takeData("txt.username"),destination,msg)
print("Email Sent")
self.server.quit()
| 30.87037
| 171
| 0.59988
|
b1efe502d0fdfce3d84a7fd294898e4a6b4cbcf3
| 1,182
|
py
|
Python
|
state_machine/example_cqi.py
|
Stvad/pyconie_dsl
|
ef132b4cec9a8ae130732b690bf1003342af3c31
|
[
"Apache-2.0"
] | 2
|
2018-11-07T17:43:57.000Z
|
2020-12-07T13:13:11.000Z
|
state_machine/example_cqi.py
|
Stvad/pyconie_dsl
|
ef132b4cec9a8ae130732b690bf1003342af3c31
|
[
"Apache-2.0"
] | null | null | null |
state_machine/example_cqi.py
|
Stvad/pyconie_dsl
|
ef132b4cec9a8ae130732b690bf1003342af3c31
|
[
"Apache-2.0"
] | 1
|
2020-01-23T22:15:28.000Z
|
2020-01-23T22:15:28.000Z
|
from .model import State, Event, Action, Transition, StateMachine
tv_on = Event('tvOn', 'TVON')
couch_active = Event('couchActive', 'COAC')
it_is_evening = Event('itIsEvening', 'ITEV')
new_day = Event('newDay', 'NEDY')
turn_off_tv = Action('turnOffTv', 'TTOF')
order_pizza = Action('orderPizza', 'OPZZ')
idle = State('idle')
active = State('active')
waiting_for_couch = State('waitingForCouch')
waiting_for_tv = State('waitingForTv')
pizza_is_coming = State('pizzaIsComing')
idle.add_transition(Transition(idle, active, it_is_evening))
idle.add_actions(turn_off_tv)
active.add_transition(Transition(active, waiting_for_couch, tv_on))
active.add_transition(Transition(active, waiting_for_tv, couch_active))
waiting_for_couch.add_transition(Transition(waiting_for_couch, pizza_is_coming, couch_active))
waiting_for_tv.add_transition(Transition(waiting_for_tv, pizza_is_coming, tv_on))
pizza_is_coming.add_transition(Transition(pizza_is_coming, idle, new_day))
pizza_is_coming.add_actions(order_pizza)
machine = StateMachine(idle, idle, {new_day})
machine.handle(tv_on)
machine.handle(it_is_evening)
machine.handle(tv_on)
machine.handle(tv_on)
machine.handle(couch_active)
| 30.307692
| 94
| 0.799492
|
6ef362ed2a145ca664bdfcc7750c98831f6e2443
| 675
|
py
|
Python
|
sked/migrations/0001_initial.py
|
ABASystems/django-sked
|
48a31291c9735b48fa734bf55b8697abc14788bb
|
[
"BSD-3-Clause"
] | null | null | null |
sked/migrations/0001_initial.py
|
ABASystems/django-sked
|
48a31291c9735b48fa734bf55b8697abc14788bb
|
[
"BSD-3-Clause"
] | null | null | null |
sked/migrations/0001_initial.py
|
ABASystems/django-sked
|
48a31291c9735b48fa734bf55b8697abc14788bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-05 22:26
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Accrual',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateField()),
('values', django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
),
]
| 25.961538
| 114
| 0.608889
|
9c0bec9e297261e9fac2959269db9758bd9b7f47
| 26,449
|
py
|
Python
|
representation_batch_rl/representation_batch_rl/pse_pixels.py
|
xxdreck/google-research
|
dac724bc2b9362d65c26747a8754504fe4c615f8
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
representation_batch_rl/representation_batch_rl/pse_pixels.py
|
xxdreck/google-research
|
dac724bc2b9362d65c26747a8754504fe4c615f8
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
representation_batch_rl/representation_batch_rl/pse_pixels.py
|
admariner/google-research
|
7cee4b22b925581d912e8d993625c180da2a5a4f
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Implementation of PSEs (based on https://github.com/google-research/google-research/blob/master/pse/dm_control/agents/pse_drq_agent.py)."""
import typing
from dm_env import specs as dm_env_specs
import numpy as np
from seed_rl.agents.policy_gradient.modules import popart
from seed_rl.agents.policy_gradient.modules import running_statistics
import tensorflow as tf
from tf_agents.specs.tensor_spec import TensorSpec
from representation_batch_rl.batch_rl import critic
from representation_batch_rl.batch_rl.encoders import ConvStack
from representation_batch_rl.batch_rl.encoders import ImageEncoder
from representation_batch_rl.batch_rl.encoders import make_impala_cnn_network
from representation_batch_rl.representation_batch_rl import tf_utils
EPS = 1e-8
@tf.function
def cosine_similarity(x, y):
"""Computes cosine similarity between all pairs of vectors in x and y."""
x_expanded, y_expanded = x[:, tf.newaxis], y[tf.newaxis, :]
similarity_matrix = tf.reduce_sum(x_expanded * y_expanded, axis=-1)
similarity_matrix /= (
tf.norm(x_expanded, axis=-1) * tf.norm(y_expanded, axis=-1) + EPS)
return similarity_matrix
@tf.function
def sample_indices(dim_x, size=128, sort=False):
dim_x = tf.cast(dim_x, tf.int32)
indices = tf.range(0, dim_x, dtype=tf.int32)
indices = tf.random.shuffle(indices)[:size]
if sort:
indices = tf.sort(indices)
return indices
@tf.function
def representation_alignment_loss(representation_1,
representation_2,
metric_vals,
use_coupling_weights=False,
coupling_temperature=0.1,
return_representation=False,
temperature=1.0):
"""PSE loss. Refer to https://github.com/google-research/google-research/blob/master/pse/dm_control/utils/helper_utils.py#L54 .""" # pylint: disable=line-too-long
if np.random.randint(2) == 1:
representation_1, representation_2 = representation_2, representation_1
metric_vals = tf.transpose(metric_vals)
indices = sample_indices(tf.shape(metric_vals)[0], sort=return_representation)
metric_vals = tf.gather(metric_vals, indices, axis=0)
similarity_matrix = cosine_similarity(representation_1, representation_2)
alignment_loss = contrastive_loss(
similarity_matrix,
metric_vals,
temperature,
coupling_temperature=coupling_temperature,
use_coupling_weights=use_coupling_weights)
if return_representation:
return alignment_loss, similarity_matrix
else:
return alignment_loss
@tf.function
def contrastive_loss(similarity_matrix,
metric_values,
temperature,
coupling_temperature=1.0,
use_coupling_weights=True):
"""Contrative Loss with soft coupling."""
assert temperature > 0.
metric_shape = tf.shape(metric_values)
similarity_matrix /= temperature
neg_logits1 = similarity_matrix
col_indices = tf.cast(tf.argmin(metric_values, axis=1), dtype=tf.int32)
pos_indices1 = tf.stack(
(tf.range(metric_shape[0], dtype=tf.int32), col_indices), axis=1)
pos_logits1 = tf.gather_nd(similarity_matrix, pos_indices1)
if use_coupling_weights:
metric_values /= coupling_temperature
coupling = tf.exp(-metric_values)
pos_weights1 = -tf.gather_nd(metric_values, pos_indices1)
pos_logits1 += pos_weights1
negative_weights = tf.math.log((1.0 - coupling) + EPS)
neg_logits1 += tf.tensor_scatter_nd_update(negative_weights, pos_indices1,
pos_weights1)
neg_logits1 = tf.math.reduce_logsumexp(neg_logits1, axis=1)
return tf.reduce_mean(neg_logits1 - pos_logits1)
def _get_action(replay):
if isinstance(replay, list):
return np.array([x.action for x in replay])
else:
return replay.action
def _calculate_action_cost_matrix(ac1, ac2):
diff = tf.expand_dims(ac1, axis=1) - tf.expand_dims(ac2, axis=0)
return tf.cast(tf.reduce_mean(tf.abs(diff), axis=-1), dtype=tf.float32)
def metric_fixed_point_fast(cost_matrix, gamma=0.99, eps=1e-7):
"""Dynamic prograaming for calculating PSM."""
d = np.zeros_like(cost_matrix)
def operator(d_cur):
d_new = 1 * cost_matrix
discounted_d_cur = gamma * d_cur
d_new[:-1, :-1] += discounted_d_cur[1:, 1:]
d_new[:-1, -1] += discounted_d_cur[1:, -1]
d_new[-1, :-1] += discounted_d_cur[-1, 1:]
return d_new
while True:
d_new = operator(d)
if np.sum(np.abs(d - d_new)) < eps:
break
else:
d = d_new[:]
return d
def compute_metric(actions1, actions2, gamma):
action_cost = _calculate_action_cost_matrix(actions1, actions2)
return tf_metric_fixed_point(action_cost, gamma=gamma)
@tf.function
def tf_metric_fixed_point(action_cost_matrix, gamma):
return tf.numpy_function(
metric_fixed_point_fast, [action_cost_matrix, gamma], Tout=tf.float32)
class PSE(object):
"""Class performing CQL training."""
def __init__(self,
observation_spec,
action_spec,
actor_lr = 1e-4,
critic_lr = 3e-4,
discount = 0.99,
tau = 0.005,
target_entropy = 0.0,
reg = 0.0,
num_cql_actions = 10,
embedding_dim = 512,
bc_pretraining_steps = 40_000,
min_q_weight = 10.0,
num_augmentations = 1,
rep_learn_keywords = 'outer',
batch_size = 256,
temperature = 1.):
"""Creates networks.
Args:
observation_spec: environment observation spec.
action_spec: Action spec.
actor_lr: Actor learning rate.
critic_lr: Critic learning rate.
discount: MDP discount.
tau: Soft target update parameter.
target_entropy: Target entropy.
reg: Coefficient for out of distribution regularization.
num_cql_actions: Number of actions to sample for CQL loss.
embedding_dim: Size of embedding (now hardcoded)
bc_pretraining_steps: Use BC loss instead of CQL loss for N steps.
min_q_weight: CQL alpha.
num_augmentations: Num of random crops
rep_learn_keywords: Representation learning loss to add.
batch_size: Batch size
temperature: NCE softmax temperature
"""
del embedding_dim
self.num_augmentations = num_augmentations
self.batch_size = batch_size
self.rep_learn_keywords = rep_learn_keywords.split('__')
self.temperature = temperature
actor_kwargs = {}
critic_kwargs = {}
if observation_spec.shape == (64, 64, 3):
# IMPALA for Procgen
def conv_stack():
return make_impala_cnn_network(
depths=[16, 32, 32], use_batch_norm=False, dropout_rate=0.)
state_dim = 256
else:
# Reduced architecture for DMC
def conv_stack():
return ConvStack(observation_spec.shape)
state_dim = 50
conv_stack_bc = conv_stack()
conv_stack_actor = conv_stack()
conv_stack_critic = conv_stack()
conv_target_stack_critic = conv_stack()
if observation_spec.shape == (64, 64, 3):
conv_stack_bc.output_size = state_dim
conv_stack_actor.output_size = state_dim
conv_stack_critic.output_size = state_dim
conv_target_stack_critic.output_size = state_dim
# Combine and stop_grad some of the above conv stacks
actor_kwargs['encoder_bc'] = ImageEncoder(
conv_stack_bc, feature_dim=state_dim, bprop_conv_stack=True)
actor_kwargs['encoder'] = ImageEncoder(
conv_stack_critic, feature_dim=state_dim, bprop_conv_stack=False)
critic_kwargs['encoder'] = ImageEncoder(
conv_stack_critic, feature_dim=state_dim, bprop_conv_stack=True)
# Note: the target critic does not share any weights.
critic_kwargs['encoder_target'] = ImageEncoder(
conv_target_stack_critic, feature_dim=state_dim, bprop_conv_stack=True)
conv_stack_critic_per_level = conv_stack()
conv_target_stack_critic_per_level = conv_stack()
if observation_spec.shape == (64, 64, 3):
conv_stack_critic_per_level.output_size = state_dim
conv_target_stack_critic_per_level.output_size = state_dim
self.encoder_per_level = ImageEncoder(
conv_stack_critic_per_level,
feature_dim=state_dim,
bprop_conv_stack=True)
self.encoder_per_level_target = ImageEncoder(
conv_target_stack_critic_per_level,
feature_dim=state_dim,
bprop_conv_stack=True)
critic.soft_update(
self.encoder_per_level, self.encoder_per_level_target, tau=1.0)
if self.num_augmentations == 0:
dummy_state = tf.constant(
np.zeros(shape=[1] + list(observation_spec.shape)))
else: # account for padding of +4 everywhere and then cropping out 68
dummy_state = tf.constant(np.zeros(shape=[1, 68, 68, 3]))
@tf.function
def init_models():
actor_kwargs['encoder_bc'](dummy_state)
actor_kwargs['encoder'](dummy_state)
critic_kwargs['encoder'](dummy_state)
critic_kwargs['encoder_target'](dummy_state)
self.encoder_per_level(dummy_state)
self.encoder_per_level_target(dummy_state)
init_models()
hidden_dims = (256, 256)
# self.actor = policies.CategoricalPolicy(state_dim, action_spec,
# hidden_dims=hidden_dims, encoder=actor_kwargs['encoder'])
action_dim = action_spec.maximum.item() + 1
self.action_dim = action_dim
self.output_dim_level = action_dim
self.log_alpha = tf.Variable(tf.math.log(1.0), trainable=True)
self.log_cql_alpha = self.log_alpha
self.alpha_optimizer = tf.keras.optimizers.Adam(learning_rate=actor_lr)
self.critic = critic.Critic(
state_dim,
action_dim,
hidden_dims=hidden_dims,
encoder=critic_kwargs['encoder'],
discrete_actions=True,
linear='linear_Q' in self.rep_learn_keywords)
self.critic_target = critic.Critic(
state_dim,
action_dim,
hidden_dims=hidden_dims,
encoder=critic_kwargs['encoder_target'],
discrete_actions=True,
linear='linear_Q' in self.rep_learn_keywords)
self.latent_dim = 256
self.embedding = tf.keras.Sequential([
tf.keras.layers.Dense(self.latent_dim, use_bias=True),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(self.latent_dim, use_bias=True)
],
name='embedding')
dummy_enc = critic_kwargs['encoder'](dummy_state)
hidden_dims_per_level = (256, 256)
self.task_critic_one = critic.Critic(
state_dim,
action_dim * 200,
hidden_dims=hidden_dims_per_level,
encoder=None, # critic_kwargs['encoder'],
discrete_actions=True,
cross_norm=False)
self.task_critic_target_one = critic.Critic(
state_dim,
action_dim * 200,
hidden_dims=hidden_dims_per_level,
encoder=None, # critic_kwargs['encoder'],
discrete_actions=True,
cross_norm=False)
self.task_critic_one(
dummy_enc,
actions=None,
training=False,
return_features=False,
stop_grad_features=False)
self.task_critic_target_one(
dummy_enc,
actions=None,
training=False,
return_features=False,
stop_grad_features=False)
@tf.function
def init_models2():
dummy_state = tf.zeros((1, 68, 68, 3), dtype=tf.float32)
phi_s = self.critic.encoder(dummy_state)
phi_a = tf.eye(15, dtype=tf.float32)
if 'linear_Q' in self.rep_learn_keywords:
phi2_s = self.critic.critic1.state_encoder(phi_s)
_ = self.critic.critic2.state_encoder(phi_s)
_ = self.critic.critic1.action_encoder(phi_a)
_ = self.critic.critic2.action_encoder(phi_a)
_ = self.critic_target.critic1.state_encoder(phi_s)
_ = self.critic_target.critic2.state_encoder(phi_s)
_ = self.critic_target.critic1.action_encoder(phi_a)
_ = self.critic_target.critic2.action_encoder(phi_a)
self.embedding(phi2_s)
init_models2()
norm_beta = 0.1
self.reward_normalizer = popart.PopArt(
running_statistics.EMAMeanStd(norm_beta))
self.reward_normalizer.init()
critic.soft_update(
self.critic, self.critic_target, tau=1.0)
critic.soft_update(
self.task_critic_one, self.task_critic_target_one, tau=1.0)
self.critic_optimizer = tf.keras.optimizers.Adam(learning_rate=critic_lr)
self.task_critic_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
self.br_optimizer = tf.keras.optimizers.Adam(learning_rate=critic_lr)
self.tau = tau
self.reg = reg
self.target_entropy = target_entropy
self.discount = discount
self.num_cql_actions = num_cql_actions
self.bc_pretraining_steps = bc_pretraining_steps
self.min_q_weight = min_q_weight
self.bc = None
self.model_dict = {
'critic': self.critic,
'critic_target': self.critic_target,
'critic_optimizer': self.critic_optimizer,
'alpha_optimizer': self.alpha_optimizer
}
@tf.function
def fit_task_critics(self, mb_states, mb_actions,
mb_next_states, mb_next_actions,
mb_rewards, mb_discounts,
level_ids):
"""Updates per-level critic parameters.
Args:
mb_states: Batch of states.
mb_actions: Batch of actions.
mb_next_states: Batch of next states.
mb_next_actions: Batch of next actions from training policy.
mb_rewards: Batch of rewards.
mb_discounts: Batch of masks indicating the end of the episodes.
level_ids: Batch of level ids
Returns:
Dictionary with information to track.
"""
mb_rewards = self.reward_normalizer.normalize_target(mb_rewards)
trainable_variables = (self.encoder_per_level.trainable_variables
+ self.task_critic_one.trainable_variables)
next_action_indices = tf.stack([
tf.range(tf.shape(mb_next_actions)[0],
dtype=tf.int32), level_ids * self.output_dim_level +
tf.cast(mb_next_actions, dtype=tf.int32)
],
axis=-1)
action_indices = tf.stack([
tf.range(tf.shape(mb_actions)[0], dtype=tf.int32),
level_ids * self.output_dim_level + tf.cast(mb_actions, dtype=tf.int32)
],
axis=-1)
level_ids = tf.stack([
tf.range(tf.shape(mb_next_actions)[0],
dtype=tf.int32), tf.cast(level_ids, dtype=tf.int32)
],
axis=-1)
if 'parallelPerLevel' in self.rep_learn_keywords:
next_states = [self.encoder_per_level_target(mb_next_states[0])]
next_q1, next_q2 = self.task_critic_target_one(
next_states[0], actions=None)
target_q = tf.expand_dims(
mb_rewards, 1) + self.discount * tf.expand_dims(
mb_discounts, 1) * tf.minimum(next_q1, next_q2)
target_q = tf.gather_nd(target_q, indices=next_action_indices)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(trainable_variables)
states = [self.encoder_per_level(mb_states[0])]
q1_all, q2_all = self.task_critic_one(states[0], actions=None)
q = tf.minimum(q1_all, q2_all)
critic_loss = (
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q1_all, indices=action_indices)) +
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q2_all, indices=action_indices)))
critic_grads = tape.gradient(critic_loss, trainable_variables)
self.task_critic_optimizer.apply_gradients(zip(critic_grads,
trainable_variables))
critic.soft_update(
self.encoder_per_level, self.encoder_per_level_target, tau=self.tau)
critic.soft_update(
self.task_critic_one, self.task_critic_target_one, tau=self.tau)
gn = tf.reduce_mean(
[tf.linalg.norm(v) for v in critic_grads if v is not None])
return {
'avg_level_critic_loss': tf.reduce_mean(critic_loss),
'avg_q': tf.reduce_mean(q),
'level_critic_grad_norm': gn
}
def fit_critic(self, states, actions,
next_states, next_actions, rewards,
discounts):
"""Updates critic parameters.
Args:
states: Batch of states.
actions: Batch of actions.
next_states: Batch of next states.
next_actions: Batch of next actions from training policy.
rewards: Batch of rewards.
discounts: Batch of masks indicating the end of the episodes.
Returns:
Dictionary with information to track.
"""
action_indices = tf.stack(
[tf.range(tf.shape(actions)[0], dtype=tf.int64), actions], axis=-1)
next_action_indices = tf.stack(
[tf.range(tf.shape(next_actions)[0], dtype=tf.int64), next_actions],
axis=-1)
if self.num_augmentations > 0:
target_q = 0.
for i in range(self.num_augmentations):
next_q1_i, next_q2_i = self.critic_target(next_states[i], actions=None)
target_q_i = tf.expand_dims(
rewards, 1) + self.discount * tf.expand_dims(
discounts, 1) * tf.minimum(next_q1_i, next_q2_i)
target_q += target_q_i
target_q /= self.num_augmentations
else:
next_q1, next_q2 = self.critic_target(next_states, actions=None)
target_q = tf.expand_dims(rewards, 1) + self.discount * tf.expand_dims(
discounts, 1) * tf.minimum(next_q1, next_q2)
target_q = tf.gather_nd(target_q, indices=next_action_indices)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.critic.trainable_variables)
if self.num_augmentations > 0:
critic_loss = 0.
q1 = 0.
q2 = 0.
for i in range(self.num_augmentations):
q1_i, q2_i = self.critic(states[i], actions=None)
critic_loss_i = (
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q1_i, indices=action_indices)) +
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q2_i, indices=action_indices)))
q1 += q1_i
q2 += q2_i
critic_loss += critic_loss_i
q1 /= self.num_augmentations
q2 /= self.num_augmentations
critic_loss /= self.num_augmentations
else:
q1, q2 = self.critic(states, actions=None)
q = tf.minimum(q1, q2)
critic_loss = (
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q1, indices=action_indices)) +
tf.losses.mean_squared_error(
target_q, tf.gather_nd(q2, indices=action_indices)))
cql_logsumexp = tf.reduce_logsumexp(q, 1)
cql_loss = tf.reduce_mean(cql_logsumexp -
tf.gather_nd(q, indices=action_indices))
critic_loss += (self.reg * cql_loss)
critic_grads = tape.gradient(critic_loss, self.critic.trainable_variables)
self.critic_optimizer.apply_gradients(
zip(critic_grads, self.critic.trainable_variables))
critic.soft_update(self.critic, self.critic_target, tau=self.tau)
return {
'q1': tf.reduce_mean(q1),
'q2': tf.reduce_mean(q2),
'critic_loss': critic_loss,
'cql_loss': cql_loss
}
# @tf.function
def fit_embedding(self, states, actions,
next_states, next_actions, rewards,
discounts, level_ids):
"""Fit representation (pixel encoder).
Args:
states: tf.tensor
actions: tf.tensor
next_states: tf.tensor
next_actions: tf.tensor
rewards: tf.tensor
discounts: tf.tensor
level_ids: tf.tensor, contains level id labels
Returns:
embedding_dict: dict
"""
del next_actions, discounts, rewards, next_states
ssl_variables = (self.critic.trainable_variables +
self.embedding.trainable_variables)
with tf.GradientTape(watch_accessed_variables=True) as tape:
uniques, _, counts = tf.unique_with_counts(level_ids)
uniques = tf.cast(uniques, dtype=tf.int32)
level_order = tf.argsort(counts, direction='DESCENDING')
# Take two most frequent levels in the batch
lvls = tf.gather(uniques, level_order)[:2]
lvl_1, lvl_2 = lvls[0], lvls[1]
min_count = tf.reduce_min(tf.gather(counts, level_order)[:2])
idx_1 = tf.math.equal(level_ids, lvl_1)
idx_2 = tf.math.equal(level_ids, lvl_2)
act1 = tf.one_hot(actions[idx_1][:min_count], 15)
act2 = tf.one_hot(actions[idx_2][:min_count], 15)
representation = self.embedding(self.critic.encoder(states[0]))
representation_1 = representation[idx_1][:min_count]
representation_2 = representation[idx_2][:min_count]
metric_vals = compute_metric(act1, act2, self.discount)
embedding_loss = representation_alignment_loss(
representation_1,
representation_2,
metric_vals,
use_coupling_weights=False,
temperature=self.temperature,
return_representation=False)
br_grads = tape.gradient(embedding_loss, ssl_variables)
self.br_optimizer.apply_gradients(zip(br_grads, ssl_variables))
gn = tf.reduce_mean([tf.linalg.norm(v) for v in br_grads if v is not None])
return {
'embedding_loss': embedding_loss,
'embedding_grad_norm': gn
}
# @tf.function
def update_step(self, replay_buffer_iter, train_target='both'):
"""Performs a single training step for critic and embedding.
Args:
replay_buffer_iter: A tensorflow graph iteratable object.
train_target: string specifying whether update RL and or representation
Returns:
Dictionary with losses to track.
"""
transition = next(replay_buffer_iter)
numpy_dataset = isinstance(replay_buffer_iter, np.ndarray)
# observation: n_batch x n_timesteps x 1 x H*W*3*n_frames x 1
# -> n_batch x H x W x 3*n_frames
if not numpy_dataset:
states = transition.observation[:, 0]
next_states = transition.observation[:, 1]
actions = transition.action[:, 0]
rewards = transition.reward
level_ids = transition.policy_info[:, 0]
if tf.shape(transition.reward)[1] > 2:
rewards = tf.einsum(
'ij,j->i', rewards,
self.discount**tf.range(
0, tf.shape(transition.reward)[1], dtype=tf.float32))
self.n_step_rewards = tf.shape(transition.reward)[1]
else:
rewards = transition.reward[:, 0]
self.n_step_rewards = 1
discounts = transition.discount[:, 0]
if transition.observation.dtype == tf.uint8:
states = tf.cast(states, tf.float32) / 255.
next_states = tf.cast(next_states, tf.float32) / 255.
else:
states, actions, rewards, next_states, discounts = transition
self.reward_normalizer.update_normalization_statistics(rewards)
if self.num_augmentations > 0:
states, next_states = tf_utils.image_aug(
states,
next_states,
img_pad=4,
num_augmentations=self.num_augmentations,
obs_dim=64,
channels=3,
cropped_shape=[self.batch_size, 68, 68, 3])
next_actions_pi = self.act(next_states, data_aug=True)
next_actions_mu = transition.action[:, 1] # pylint: disable=unused-variable
next_actions_pi_per_level = next_actions_mu
states_b1 = states
next_states_b1 = next_states
actions_b1 = actions
next_actions_b1 = next_actions_pi
rewards_b1 = rewards
discounts_b1 = discounts
level_ids_b1 = level_ids
states_b2 = states
next_states_b2 = next_states
actions_b2 = actions
next_actions_b2 = next_actions_pi
rewards_b2 = rewards
discounts_b2 = discounts
if train_target == 'encoder':
print('Updating per-task critics')
critic_distillation_dict = self.fit_task_critics(
states_b1, actions_b1, next_states_b1, next_actions_pi_per_level,
rewards_b1,
discounts_b1, level_ids_b1)
print('Done updating per-task critics')
ssl_dict = {}
critic_dict = {}
return {**ssl_dict, **critic_distillation_dict}
elif train_target == 'rl':
critic_distillation_dict = {}
print('Updating critic')
critic_dict = self.fit_critic(states_b2, actions_b2, next_states_b2,
next_actions_b2, rewards_b2, discounts_b2)
print('Updating embedding')
ssl_dict = self.fit_embedding(states_b1, actions_b1, next_states_b1,
next_actions_b1, rewards_b1, discounts_b1,
level_ids)
print('Done')
return {**ssl_dict, **critic_dict, **critic_distillation_dict}
@tf.function
def act(self, states, data_aug=False):
"""Act with batch of states.
Args:
states: tf.tensor n_batch x 64 x 64 x 3
data_aug: bool, whether to use stochastic data aug (else deterministic)
Returns:
action: tf.tensor
"""
if data_aug and self.num_augmentations > 0:
states = states[0]
if self.num_augmentations > 0:
# use pad of 2 to bump 64 to 68 with 2 + 64 + 2 on each side
img_pad = 2
paddings = tf.constant(
[[0, 0], [img_pad, img_pad], [img_pad, img_pad], [0, 0]],
dtype=tf.int32)
states = tf.cast(
tf.pad(tf.cast(states * 255., tf.int32), paddings, 'SYMMETRIC'),
tf.float32) / 255.
q1, q2 = self.critic(states, actions=None)
q = tf.minimum(q1, q2)
actions = tf.argmax(q, -1)
return actions
| 35.549731
| 165
| 0.666037
|
82a772982bc6a81f416d6aa672e49f389fefebb0
| 422
|
py
|
Python
|
plasmapy/formulary/__init__.py
|
KhalilBryant/PlasmaPy
|
05f7cb60348c7048fb3b8fbaf25985f2fba47fb7
|
[
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-02-14T16:35:02.000Z
|
2020-02-14T16:35:02.000Z
|
plasmapy/formulary/__init__.py
|
KhalilBryant/PlasmaPy
|
05f7cb60348c7048fb3b8fbaf25985f2fba47fb7
|
[
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
plasmapy/formulary/__init__.py
|
KhalilBryant/PlasmaPy
|
05f7cb60348c7048fb3b8fbaf25985f2fba47fb7
|
[
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
"""
The `~plasmapy.formulary` subpackage contains commonly used formulae
from plasma science.
"""
from .braginskii import *
from .collisions import *
from .dielectric import *
from .dimensionless import *
from .dispersionfunction import *
from .distribution import *
from .drifts import *
from .magnetostatics import *
from .mathematics import *
from .parameters import *
from .quantum import *
from .relativity import *
| 23.444444
| 68
| 0.772512
|
ff20e20720de8a2dd248f3ad1a2044ca4f0864f5
| 2,400
|
py
|
Python
|
setup.py
|
MartinHjelmare/cam_acq
|
3a699f621bb0aa7729c8378f52aac647b7acbf3d
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
MartinHjelmare/cam_acq
|
3a699f621bb0aa7729c8378f52aac647b7acbf3d
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
MartinHjelmare/cam_acq
|
3a699f621bb0aa7729c8378f52aac647b7acbf3d
|
[
"Apache-2.0"
] | null | null | null |
"""Set up file for camacq package."""
from pathlib import Path
from setuptools import find_packages, setup
PROJECT_DIR = Path(__file__).parent.resolve()
VERSION = (PROJECT_DIR / "camacq" / "VERSION").read_text(encoding="utf-8").strip()
GITHUB_URL = "https://github.com/CellProfiling/cam_acq"
REQUIRES = [
"async_timeout",
"colorlog",
"jinja2",
"leicacam>=0.4.0",
"leicaimage",
"numpy",
"ruamel.yaml>=0.15",
"tifffile",
"voluptuous",
"xmltodict",
]
README_FILE = PROJECT_DIR / "README.md"
LONG_DESCRIPTION = README_FILE.read_text(encoding="utf-8")
DOWNLOAD_URL = f"{GITHUB_URL}/archive/master.zip"
CLASSIFIERS = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
# Indicate who your project is intended for
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: Apache Software License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
CONFIG = {
"description": "Control microscope through client server program.",
"long_description": LONG_DESCRIPTION,
"long_description_content_type": "text/markdown",
"author": "Martin Hjelmare",
"url": GITHUB_URL,
"download_url": DOWNLOAD_URL,
"license": "Apache-2.0",
"author_email": "marhje52@gmail.com",
"version": VERSION,
"python_requires": ">=3.8",
"install_requires": REQUIRES,
"packages": find_packages(exclude=["contrib", "docs", "tests*"]),
"include_package_data": True,
"entry_points": {
"console_scripts": ["camacq = camacq.__main__:main"],
"camacq.plugins": [
"api = camacq.plugins.api",
"automations = camacq.plugins.automations",
"leica = camacq.plugins.leica",
"rename_image = camacq.plugins.rename_image",
"sample = camacq.plugins.sample",
],
},
"name": "camacq",
"zip_safe": False,
"classifiers": CLASSIFIERS,
}
setup(**CONFIG)
| 32
| 82
| 0.6475
|
f79ad7b14bff5001e18640b3929db9a8296e528e
| 584
|
py
|
Python
|
model/builder.py
|
BrokenShell/LabsStarter
|
04c11aa4d7149f38ee5597cab46ea3ed0408ccf3
|
[
"MIT"
] | null | null | null |
model/builder.py
|
BrokenShell/LabsStarter
|
04c11aa4d7149f38ee5597cab46ea3ed0408ccf3
|
[
"MIT"
] | null | null | null |
model/builder.py
|
BrokenShell/LabsStarter
|
04c11aa4d7149f38ee5597cab46ea3ed0408ccf3
|
[
"MIT"
] | null | null | null |
from sklearn import svm, datasets
from joblib import dump
from sklearn.model_selection import train_test_split
X, y = datasets.load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y,
test_size=0.2,
stratify=y,
random_state=42,
)
model = svm.SVC(
class_weight='balanced',
probability=True,
random_state=42,
)
model.fit(X_train, y_train)
dump(model, '../project/app/model.joblib')
print(f"Training Accuracy: {100 * model.score(X_train, y_train):.2f}%")
print(f"Validation Accuracy: {100 * model.score(X_test, y_test):.2f}%")
| 25.391304
| 71
| 0.72089
|
60efc6a80b7f176cdf44400ccadadbd7b0a34983
| 43,528
|
py
|
Python
|
experiment_specific/transient_attention/HolcombeKristjansson.py
|
alexholcombe/MOTcircular
|
6b4cfbf58a641a6b20b38e9acda11b114a60c060
|
[
"MIT"
] | 2
|
2016-01-11T19:51:58.000Z
|
2019-11-25T12:04:29.000Z
|
experiment_specific/transient_attention/HolcombeKristjansson.py
|
alexholcombe/MOTcircular
|
6b4cfbf58a641a6b20b38e9acda11b114a60c060
|
[
"MIT"
] | null | null | null |
experiment_specific/transient_attention/HolcombeKristjansson.py
|
alexholcombe/MOTcircular
|
6b4cfbf58a641a6b20b38e9acda11b114a60c060
|
[
"MIT"
] | 2
|
2016-01-11T19:52:00.000Z
|
2020-05-15T13:34:10.000Z
|
from __future__ import print_function
from __future__ import division
__author__ = """Alex "O." Holcombe""" ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import helpersAOH
from psychopy import *
import psychopy.info
from psychopy import sound, monitors, logging
import numpy as np
import itertools #to calculate all subsets
from copy import deepcopy
from math import atan, pi, cos, sin, sqrt, ceil, atan2
import time, sys, platform, os, StringIO, gc, random
eyetrackingOption = True #Include this so can turn it off, because Psychopy v1.83.01 mistakenly included an old version of pylink which prevents EyelinkEyetrackerForPsychopySUPA3 stuff from importing
if eyetrackingOption:
from EyelinkEyetrackerForPsychopySUPA3 import Tracker_EyeLink #Chris Fajou integration
from helpersAOH import accelerateComputer, openMyStimWindow, constructThickThinWedgeRingsTargetAndCue
eyetracking = False
getEyeTrackingFileFromEyetrackingMachineAtEndOfExperiment = False #If True, can take up to 1.5 hrs in certain conditions
quitFinder = True
if quitFinder:
applescript="\'tell application \"Finder\" to quit\'" #quit Finder.
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
process_priority = 'realtime' # 'normal' 'high' or 'realtime'
disable_gc = True
subject='test'#'test'
autoLogging = False
demo = False
autopilot=False
if autopilot: subject='auto'
feedback=True
exportImages= False #quits after one trial / output image
screenshot= False; screenshotDone = False;allowGUI = False;waitBlank = False
trackAllIdenticalColors = True#with tracking, can either use same colors as other task (e.g. 6 blobs but only 3 colors so have to track one of 2) or set all blobs identical color
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
respTypes=['order']; respType=respTypes[0]
bindRadiallyRingToIdentify=1 #0 is inner, 1 is outer
gratingTexPix=1024#numpy textures must be a power of 2. So, if numColorsRoundTheRing not divide without remainder into textPix, there will be some rounding so patches will not all be same size
numRings=2
radii=[25] #Need to encode as array for those experiments wherein more than one ring presented
respRadius=radii[0] #deg
refreshRate= 60 #85 #set to the framerate of the monitor
useClock = False #as opposed to using frame count, which assumes no frames are ever missed
fullscr=1; #show in small window (0) or full screen (1)
scrn=0 #which screen to display the stimuli. 0 is home screen, 1 is second screen
# create a dialog from dictionary
infoFirst = { 'Autopilot':autopilot, 'Check refresh etc':True, 'Screen to use':scrn, 'Fullscreen (timing errors if not)': fullscr, 'Screen refresh rate': refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='MOT',
order=['Autopilot','Check refresh etc', 'Screen to use', 'Screen refresh rate', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating',
'Screen to use': '0 means primary screen, 1 means second screen'},
)
if not OK.OK:
print('User cancelled from dialog box'); logging.info('User cancelled from dialog box'); core.quit()
autopilot = infoFirst['Autopilot']
checkRefreshEtc = infoFirst['Check refresh etc']
scrn = infoFirst['Screen to use']
print('scrn = ',scrn, ' from dialog box')
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if demo: refreshRate = 60.
tokenChosenEachRing= [-999]*numRings
targetDur = 1/refreshRate * 5#2 #duration of target (in seconds)
targetDur = round(targetDur * refreshRate) / refreshRate #discretize to nearest integer number of refreshes
rampUpDur=0
rampUpFrames = refreshRate*rampUpDur
ballStdDev = 1.8
mouseChoiceArea = ballStdDev*0.8 # origin =1.3
units='deg' #'cm'
timeTillReversalMin = 0.5 #0.5;
timeTillReversalMax = 1.5# 1.3 #2.9
colors_all = np.array([[1,-1,-1],[1,-1,-1]])
cueColor = np.array([1,1,1])
#monitor parameters
widthPix = 1024 #1440 #monitor width in pixels
heightPix = 768 #900 #monitor height in pixels
monitorwidth = 40.5 #28.5 #monitor width in centimeters
viewdist = 55.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
bgColor = [0,0,0] # [-1,-1,-1] #black background
monitorname = 'testMonitor' # 'mitsubishi' #in psychopy Monitors Center
if exportImages:
fullscr=0; scrn=0
widthPix = 600; heightPix = 450
monitorwidth = 25.0
if demo:
scrn=0; fullscr=0
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
monitorwidth = 23#18.0
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscr,scrn,waitBlank)
myMouse = event.Mouse(visible = 'true',win=myWin)
myWin.setRecordFrameIntervals(False)
trialsPerCondition = 2 #default value
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
print('Finished runInfo- which assesses the refresh and processes of this computer')
refreshMsg1 = 'Median frames per second ='+ str( np.round(1000./runInfo["windowRefreshTimeMedian_ms"],1) )
refreshRateTolerancePct = 3
pctOff = abs( (1000./runInfo["windowRefreshTimeMedian_ms"]-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
dlgLabelsOrdered = list() #new dialog box
myDlg = gui.Dlg(title="object tracking experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name :', subject, tip='or subject code')
dlgLabelsOrdered.append('subject')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
msgWrongResolution = ''
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Instead of desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels, screen apparently '+ str(myWinRes[0])+ 'x'+ str(myWinRes[1])
myDlg.addText(msgWrongResolution, color='Red')
print(msgWrongResolution); logging.info(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color='DimGrey') # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition ='+str(trialsPerCondition))
else:
print('User cancelled from dialog box.'); logging.info('User cancelled from dialog box')
logging.flush()
core.quit()
if os.path.isdir('.'+os.sep+'dataRaw'):
dataDir='dataRaw'
else:
msg= 'dataRaw directory does not exist, so saving data in present working directory'
print(msg); logging.info(msg)
dataDir='.'
expname = ''
fileNameWithPath = dataDir+'/'+subject+ '_' + expname+timeAndDateStr
if not demo and not exportImages:
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileNameWithPath + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
#also save helpersAOH.py because it has critical drawing commands
saveCodeCmd = 'cp \'' + 'helpersAOH.py' + '\' '+ fileNameWithPath + '_helpersAOH.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logF = logging.LogFile(fileNameWithPath+'.log',
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#info, data, warnings, and errors will be sent to this logfile
if demo or exportImages:
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.WARNING) #DEBUG means set the console to receive nearly all messges, INFO is for everything else, INFO, EXP, DATA, WARNING and ERROR
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
longerThanRefreshTolerance = .01 #0.27
longFrameLimit = round(1000./refreshRate*(1.0+longerThanRefreshTolerance),3) # round(1000/refreshRate*1.5,2)
msg = 'longFrameLimit='+ str(longFrameLimit) +' Recording trials where one or more interframe interval exceeded this figure '
logging.info(msg); print(msg)
if msgWrongResolution != '':
logging.error(msgWrongResolution)
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscr,scrn,waitBlank)
msg='Window opened'; print(msg); logging.info(msg)
myMouse = event.Mouse(visible = 'true',win=myWin)
msg='Mouse enabled'; print(msg); logging.info(msg)
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
msg = 'second window opening runInfo mean ms='+ str( runInfo["windowRefreshTimeAvg_ms"] )
logging.info(msg); print(msg)
logging.info(runInfo)
logging.info('gammaGrid='+str(mon.getGammaGrid()))
logging.info('linearizeMethod='+str(mon.getLinearizeMethod()))
eyeballRadius = 5
eyeball = visual.Circle(myWin, radius=eyeballRadius, edges=32, fillColorSpace='rgb',fillColor = (1,0,1),autoLog=autoLogging) #to outline chosen options
gaussian = visual.PatchStim(myWin, tex='none',mask='gauss',colorSpace='rgb',size=ballStdDev,autoLog=autoLogging)
gaussian2 = visual.PatchStim(myWin, tex='none',mask='gauss',colorSpace='rgb',size=ballStdDev,autoLog=autoLogging)
optionChosenCircle = visual.Circle(myWin, radius=mouseChoiceArea, edges=32, fillColorSpace='rgb',fillColor = (1,0,1),autoLog=autoLogging) #to outline chosen options
clickableRegion = visual.Circle(myWin, radius=0.5, edges=32, fillColorSpace='rgb',fillColor = (-1,1,-1),autoLog=autoLogging) #to show clickable zones
circlePostCue = visual.Circle(myWin, radius=2*radii[0], edges=32, fillColorSpace='rgb',fillColor = (-.85,-.85,-.85),lineColor=(-1,-1,-1),autoLog=autoLogging) #visual postcue
#referenceCircle allows visualisation of trajectory, mostly for debugging
referenceCircle = visual.Circle(myWin, radius=radii[0], edges=32, fillColorSpace='rgb',lineColor=(-1,-1,1),autoLog=autoLogging) #visual postcue
blindspotFill = 0 #a way for people to know if they move their eyes
if blindspotFill:
blindspotStim = visual.PatchStim(myWin, tex='none',mask='circle',size=4.8,colorSpace='rgb',color = (-1,1,-1),autoLog=autoLogging) #to outline chosen options
blindspotStim.setPos([13.1,-2.7]) #AOH, size=4.8; pos=[13.1,-2.7] #DL: [13.3,-0.8]
fixatnNoise = False
fixSizePix = 20 #make fixation big so flicker more conspicuous
if fixatnNoise:
numChecksAcross = fixSizePix/4
nearestPowerOfTwo = round( sqrt(numChecksAcross) )**2 #Because textures (created on next line) must be a power of 2
fixatnNoiseTexture = np.round( np.random.rand(nearestPowerOfTwo,nearestPowerOfTwo) ,0 ) *2.0-1
#Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=autoLogging)
fixationCounterphase= visual.PatchStim(myWin, tex=-1*fixatnNoiseTexture, colorSpace='rgb',mask='circle',size=fixSizePix,units='pix',autoLog=autoLogging)
else:
fixation = visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(.3,.3,.3),mask='circle',units='pix',size=fixSizePix,autoLog=autoLogging)
fixationCounterphase= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(-1,-1,-1),mask='circle',units='pix',size=fixSizePix,autoLog=autoLogging)
fixationPoint = visual.Circle(myWin, size=6, fillColor=(1,1,1), units='pix', lineColor=None, autoLog=autoLogging)
fixation.setPos([0,0])
fixationCounterphase.setPos([0,0])
#create noise post-mask
maskDur = 0.5;
individualMaskDurFrames = 5
numChecksAcross = 128
nearestPowerOfTwo = round( sqrt(numChecksAcross) )**2 #Because textures (created on next line) must be a power of 2
noiseMasks = []
numNoiseMasks = int( ceil(maskDur / ((1/refreshRate)*individualMaskDurFrames)) )
for i in xrange(numNoiseMasks):
whiteNoiseTexture = np.round( np.random.rand(nearestPowerOfTwo,nearestPowerOfTwo) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
noiseMask= visual.PatchStim(myWin, tex=whiteNoiseTexture,
size=(widthPix,heightPix*.9),pos=[0,heightPix*.05], units='pix', interpolate=False, autoLog=autoLogging)
noiseMasks.append(noiseMask)
respText = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
NextText = visual.TextStim(myWin,pos=(0, 0),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
NextRemindPctDoneText = visual.TextStim(myWin,pos=(-.1, -.4),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(.1, -.5),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
stimList = []
speeds = np.array([1]) #np.array( [ 0, 2 ] ) #dont want to go faster than 2 rps because of blur problem
#Set up the factorial design (list of all conditions)
for numCuesEachRing in [ [1] ]:
for numObjsEachRing in [ [4] ]:#8 #First entry in each sub-list is num objects in the first ring, second entry is num objects in the second ring
for cueLeadTime in [0.020, 0.060, 0.125, 0.167, 0.267, 0.467]: #How long is the cue on prior to the target and distractors appearing
for durMotion in [.5]: #If speed!=0, how long should cue(s) move before stopping and cueLeadTime clock begins
for speed in speeds:
for direction in [-1.0,1.0]:
for targetOffset in [-1.0, 1.0]:
for objToCueQuadrant in range(4):
stimList.append( {'numCuesEachRing':numCuesEachRing,'numObjsEachRing':numObjsEachRing,'targetOffset':targetOffset,
'cueLeadTime':cueLeadTime,'durMotion':durMotion,'speed':speed,'objToCueQuadrant':objToCueQuadrant,'direction':direction} )
#set up record of proportion correct in various conditions
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method
# extraInfo= {'subject':subject} ) #will be included in each row of dataframe and wideText. Not working in v1.82.01
numRightWrongEachSpeed = np.zeros([ len(speeds), 2 ]); #summary results to print out at end
#end setup of record of proportion correct in various conditions
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
logging.info( str('starting exp with name: "'+'MovingCue'+'" at '+timeAndDateStr) )
logging.info( 'numtrials='+ str(trials.nTotal)+' refreshRate='+str(refreshRate) )
print(' numtrials=', trials.nTotal)
logging.info('rampUpDur='+str(rampUpDur)+ ' targetDur='+ str(targetDur) + ' secs')
logging.info('task='+'track'+' respType='+respType)
logging.info( 'radii=' + str(radii) )
logging.flush()
RFcontourAmp= 0.0
RFcontourFreq = 2.0
RFcontourPhase = 0
def RFcontourCalcModulation(angle,freq,phase):
modulation = sin(angle*freq + phase) #radial frequency contour equation, e.g. http://www.journalofvision.org/content/14/11/12.full from Wilkinson et al. 1998
return modulation
ampTemporalRadiusModulation = 0.0 # 1.0/3.0
ampModulatnEachRingTemporalPhase = np.random.rand(numRings) * 2*np.pi
def xyThisFrameThisAngle(basicShape, radiiThisTrial, numRing, angle, thisFrameN, speed):
#period of oscillation should be in sec
periodOfRadiusModulation = 1.0/speed#so if speed=2 rps, radius modulation period = 0.5 s
r = radiiThisTrial[numRing]
timeSeconds = thisFrameN / refreshRate
modulatnPhaseRadians = timeSeconds/periodOfRadiusModulation * 2*pi + ampModulatnEachRingTemporalPhase[numRing]
def waveForm(phase,type):
if type=='sin':
return sin(modulatnPhaseRadians)
elif type == 'sqrWave':
ans = np.sign( sin(modulatnPhaseRadians) ) #-1 or 1. That's great because that's also sin min and max
if ans==0: ans = -1+ 2*round( np.random.rand(1)[0] ) #exception case is when 0, gives 0, so randomly change that to -1 or 1
return ans
else: print('Error! unexpected type in radiusThisFrameThisAngle')
if basicShape == 'circle':
rThis = r + waveForm(modulatnPhaseRadians,'sin') * r * ampTemporalRadiusModulation
rThis += r * RFcontourAmp * RFcontourCalcModulation(angle,RFcontourFreq,RFcontourPhase)
x = rThis*cos(angle)
y = rThis*sin(angle)
elif basicShape == 'square': #actual square-shaped trajectory. Could also add all the modulations to this, later
#Theta varies from 0 to 2pi. Instead of taking its cosine, I should just pretend it is linear. Map it to 0->1 with triangle wave
#Want 0 to pi to be -1 to 1
def triangleWave(period, phase):
#triangle wave is in sine phase (starts at 0)
y = -abs(phase % (2*period) - period) # http://stackoverflow.com/questions/1073606/is-there-a-one-line-function-that-generates-a-triangle-wave
#y goes from -period to 0. Need to rescale to -1 to 1 to match sine wave etc.
y = y/period*2 + 1
#Now goes from -1 to 1
return y
x = r * triangleWave(pi,angle)
y = r * triangleWave(pi, (angle-pi/2)%(2*pi ))
#This will always describe a diamond. To change the shape would have to use vector rotation formula
else: print('Unexpected basicShape ',basicShape)
return x,y
def angleChangeThisFrame(thisTrial, moveDirection, numRing, thisFrameN, lastFrameN):
#angleMove is deg of the circle
#speed is in units of revolutions per second
angleMovePerFrame = moveDirection[numRing]*thisTrial['direction']*thisTrial['speed']*360/refreshRate
angleMove = angleMovePerFrame*(thisFrameN-lastFrameN)
#print("angleMovePerFrame = ",angleMovePerFrame,"angleMove=",angleMove)
return angleMove
def oneFrameOfStim(thisTrial,currFrame,lastFrame,maskBegin,cues,stimRings,targetRings,lines,offsetXYeachRing):
#defining a function to draw each frame of stim. So can call second time for tracking task response phase
n=currFrame
if n<rampUpFrames:
contrast = cos( -pi+ pi* n/rampUpFrames ) /2. +.5 #starting from -pi trough of cos, and scale into 0->1 range
else: contrast = 1
if n%2:
fixation.draw()#flicker fixation on and off at framerate to see when skip frame
else:
fixationCounterphase.draw()
fixationPoint.draw()
numRing = 0 #Haven't implemented capability for multiple rings, although started out that way, got rid of it because complexity
#draw cue
cueMovementEndTime = 0
if thisTrial['speed']:
cueMovementEndTime += thisTrial['durMotion']
if n<= cueMovementEndTime*refreshRate: #cue movement interval. Afterwards, cue stationary
angleMove = angleChangeThisFrame(thisTrial, moveDirection, numRing, n, lastFrame)
cues[numRing].setOri(angleMove,operation='+',log=autoLogging)
for line in lines: #move their (eventual) position along with the cue.
eccentricity = sqrt( line.pos[0]**2 + line.pos[1]**2 ) #reverse calculate its eccentricity
currLineAngle = atan2(line.pos[1],line.pos[0]) /pi*180 #calculate its current angle
currLineAngle -= angleMove #subtraction because grating angles go opposite direction to non-gratings
x = cos(currLineAngle/180*pi) * eccentricity
y = sin(currLineAngle/180*pi) * eccentricity
line.setPos( [x,y], log=autoLogging)
#line.draw() #debug, see if it's moving
#print("cueMovementEndTime=",cueMovementEndTime,"n=",n,", in sec=",n/refreshRate, "currLineAngle=",currLineAngle, "cues ori=",cues[numRing].ori) #debugAH
cueCurrAngle = cues[numRing].ori
for cue in cues: cue.draw()
#check whether time to draw target and distractor objects
timeTargetOnset = thisTrial['cueLeadTime']
if thisTrial['speed']>0:
timeTargetOnset += thisTrial['durMotion']
if n >= round(timeTargetOnset*refreshRate): #draw target and distractor objects
linesInsteadOfArcTargets = True
#draw distractor objects
if not linesInsteadOfArcTargets:
for stimRing in stimRings:
stimRing.draw()
#draw target(s)
if not linesInsteadOfArcTargets:
for targetRing in targetRings:
targetRing.draw() #Probably just the new background (to replace the displaced target, and the target
else:
for line in lines:
line.draw()
#if n==1: print("n=",n,"timeTargetOnset = ",timeTargetOnset, "timeTargetOnset frames = ",timeTargetOnset*refreshRate, "cueLeadTime=",thisTrial['cueLeadTime']) #debugAH
if n >= round(maskBegin*refreshRate): #time for mask
howManyFramesIntoMaskInterval = round(n - maskBegin*refreshRate)
whichMask = int( howManyFramesIntoMaskInterval / individualMaskDurFrames ) #increment whichMAsk every maskFramesDur frames
whichMask = whichMask % numNoiseMasks #restart with first if no more are available
#print("individualMaskDurFrames=",individualMaskDurFrames,"howManyFramesIntoMaskInterval=",howManyFramesIntoMaskInterval, " whichMask=",whichMask, "numNoiseMasks = ",numNoiseMasks)
noiseMasks[ int(whichMask) ].draw()
if blindspotFill:
blindspotStim.draw()
return cueCurrAngle
# #######End of function definition that displays the stimuli!!!! #####################################
respPromptText = visual.TextStim(myWin,height=0.04, pos=(0, -.9),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
respPromptText.setText('Press G if the cued circle is black, or L if it is white')
#respPromptText.setText('Press G if the line is like the rim, or L if it is oriented like a spoke')
def collectResponses(expStop): #Kristjansson&Holcombe cuing experiment
#draw the possible stimuli
#eyeball left, eyeball right, eyeball down, eyeball up
#draw something that represents clockwise
responsesNeeded = 1
responsesAutopilot = list(['L'])
for r in range(responsesNeeded):
responsesAutopilot.append('L')
respcount =0
while respcount <responsesNeeded:
respPromptText.draw()
myWin.flip()
for key in event.getKeys(): #check if pressed abort-type key
key = key.upper()
if key in ['ESCAPE','Q']:
expStop = True
respcount += 1
responses.append('X') #dummy response so dont' get error when try to record in datafile before quitting
elif key.upper() in ['L','G']: #L for towards edge of screen, G for towards ctr #A for anticlockwise, L for clockwise
responses.append( key.upper() )
respcount += 1
else: #flicker response prompt to indicate invalid response
for f in range(2):
myWin.flip(); myWin.flip()
respPromptText.draw()
myWin.flip()
if autopilot:
respCount = responsesNeeded
break
return responses,responsesAutopilot, expStop
trialNum=0; numTrialsCorrect=0; expStop=False; framesSaved=0;
print('Starting experiment of',trials.nTotal,'trials. Current trial is trial ',trialNum)
NextRemindCountText.setText( str(trialNum) + ' of ' + str(trials.nTotal) )
NextRemindCountText.draw()
myWin.flip()
#end of header
trialClock = core.Clock()
stimClock = core.Clock()
thisTrial = trials.next()
ts = list();
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.4, bits=8)
highA.setVolume(0.8)
lowD = sound.Sound('E',octave=3, sampleRate=6000, secs=.4, bits=8)
if eyetracking:
if getEyeTrackingFileFromEyetrackingMachineAtEndOfExperiment:
eyeMoveFile=('EyeTrack_'+subject+'_'+timeAndDateStr+'.EDF')
tracker=Tracker_EyeLink(myWin,trialClock,subject,1, 'HV5',(255,255,255),(0,0,0),False,(widthPix,heightPix))
while trialNum < trials.nTotal and expStop==False:
accelerateComputer(1,process_priority, disable_gc) #speed up
numObjects = thisTrial['numObjsEachRing'][0] #haven't implemented additional rings yet
objsPerQuadrant = numObjects / 4
if numObjects % 4 != 0:
msg = 'numObjects not evenly divisible by 4, therefore cannot randomise quadrant. Therefore picking object to cue completely randomly'
logging.error(msg); print(msg)
objToCue = np.random.random_integers(0, numObjects-1, size=1)
else:
quadrantObjectToCue = np.random.random_integers(0, objsPerQuadrant-1, size=1)
objToCue = thisTrial['objToCueQuadrant']*objsPerQuadrant + quadrantObjectToCue
#objToCue = np.array([7]); print('HEY objToCue not randomised')
colorRings=list();
preDrawStimToGreasePipeline = list()
isReversed= list([1]) * numRings #always takes values of -1 or 1
reversalNumEachRing = list([0]) * numRings
angleIniEachRing = list( np.random.uniform(0,2*pi,size=[numRings]) )
#angleIniEachRing = list( [0] ); print('HEY angle not randomised')
cueCurrAngleEachRing = list([0]) * numRings
moveDirection = list( np.random.random_integers(0,1,size=[numRings]) *2 -1 ) #randomise initial direction
durExtra = thisTrial['durMotion'] if thisTrial['speed'] else 0 #in motion condition, cue moves for awhile before cue lead time clock starts
maskBegin = thisTrial['cueLeadTime'] + targetDur + durExtra
trialDurTotal = maskBegin + maskDur
trialDurFrames= int( trialDurTotal*refreshRate )
#Task will be to judge which thick wedge has the thin wedge offset within it
#Set up parameters to construct the thick (context),thin (target offset relative to context) wedges
gratingTexPix= 1024
visibleWedge = [0,360]
patchAngleThickWedges = 360/numObjects/2
thickWedgeColor = [1,1,1] # originally [0,-1,-1] #dark red
thinWedgeColor= [-1,-1,-1] #originally [0,0,1] #blue
cueColor=[1,-.9,-.9] #
radialMask = np.array( [0,0,0,0,1,0,0,0,0] ) # [0,0,0,0,0,0,0,1,0,0,0] )
#This is the sliver that's offset relative to the larger wedge, that you have to judge the offset of
radialMaskThinWedge = np.array( [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] )
wedgeRadiusFraction = np.where(radialMask)[0][0]*1.0 / len(radialMask)
#print('wedgeRadiusFraction = ',wedgeRadiusFraction)
wedgeThicknessFraction = len( np.where(radialMask)[0] )*1.0 / len(radialMask)
#print('wedgeThickness = ',wedgeThicknessFraction*radii[0])
wedgeCenterFraction = wedgeRadiusFraction + wedgeThicknessFraction/2.
desiredArcDistanceFractionRadius = 0.10 #.23 #Is this what controls how far apart the two arcs of the cue are?
cueInnerArcDesiredFraction = wedgeCenterFraction - desiredArcDistanceFractionRadius
cueOuterArcDesiredFraction = wedgeCenterFraction + desiredArcDistanceFractionRadius
if cueOuterArcDesiredFraction > 1:
msg='Can"t start outer arc at fraction='+str(cueOuterArcDesiredFraction)
logging.error(msg); print(msg)
fractionResolution = .02 #Quantisation of possible positions of cue arc
binsNeeded = 1.0 / fractionResolution
#setup cue parameters
cueRadialMask = np.zeros( int(binsNeeded) )
#For the cueRadialMask, want everything zero except just inside and outside of the wedges.
innerArcCenterPos = int( round( binsNeeded*cueInnerArcDesiredFraction ) )
outerArcCenterPos = int( round( binsNeeded*cueOuterArcDesiredFraction ) )
cueRadialMask[ innerArcCenterPos ] = 1
cueRadialMask[ outerArcCenterPos ] = 1
innerArcActualFraction = innerArcCenterPos*1.0/len(cueRadialMask)
outerArcActualFraction = outerArcCenterPos*1.0/len(cueRadialMask)
closeEnough = .02
if abs(cueInnerArcDesiredFraction - innerArcActualFraction) > closeEnough:
print('cueInnerArcDesiredFraction of object radius = ',cueInnerArcDesiredFraction, ' actual = ', innerArcActualFraction, ' exceeding tolerance of ',closeEnough )
if abs(cueOuterArcDesiredFraction - outerArcActualFraction) > closeEnough:
print('cueOuterArcDesiredFraction of object radius = ',cueOuterArcDesiredFraction, ' actual = ', outerArcActualFraction, ' exceeding tolerance of ',closeEnough)
thickWedgesRing,thickWedgesRingCopy, thinWedgesRing, targetRing, cueDoubleRing, lines= constructThickThinWedgeRingsTargetAndCue(myWin, \
radii[0],radialMask,radialMaskThinWedge,
cueRadialMask,visibleWedge,numObjects,patchAngleThickWedges,patchAngleThickWedges,
bgColor,thickWedgeColor,thinWedgeColor,0,thisTrial['targetOffset'],gratingTexPix,cueColor,objToCue,ppLog=logging)
#The thickWedgesRing, typically white, are drawn as a radial grating that occupies all 360 deg circular, with a texture to mask out everything else to create a ring
#The thinWedgesRing, typically black, are centered in the white and one of these wedges will be later displaced to create a target.
#The targetRing is the displaced black wedge. Actually a full circular radial grating, but visibleWedge set to subtend only the part where the target is.
#The thickWedgesRingCopy is to draw over the old, undisplaced black wedge, only in the target area. It is thus a copy of the thickWedgesRing,
# with visibleWedge set to show only the target part
#The cueRing is two red arcs to bring attention to the target area.
core.wait(.1)
myMouse.setVisible(False)
if eyetracking:
tracker.startEyeTracking(trialNum,True,widthPix,heightPix) #start recording with eyetracker
event.clearEvents() #clear key and mouseclick buffer
fixatnPeriodFrames = int( (np.random.rand(1)/2.+0.8) *refreshRate) #random interval between x and x+800ms
if (fixatnPeriodFrames-1) % 2 ==0:
fixatnPeriodFrames +=1 #make it odd
for i in range(fixatnPeriodFrames):
if i%2:
fixation.draw()
else: fixationCounterphase.draw()
fixationPoint.draw()
myWin.flip() #clearBuffer=True)
trialClock.reset()
t0=trialClock.getTime(); t=trialClock.getTime()-t0
ts = list()
stimClock.reset()
#print("trialDurFrames=",trialDurFrames,"trialDur=",trialDurFrames/refreshRate) #debug
offsetXYeachRing=[[0,0],[0,0]]
lastFrame = 0 #only used if useClock = True
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
if useClock: #Don't count on not missing frames. Use actual time.
t = stimClock.getTime()
currFrame = round(t*refreshRate)
else: currFrame = n
cueAngle = \
oneFrameOfStim(thisTrial,currFrame,lastFrame,maskBegin,[cueDoubleRing],[thickWedgesRing,thinWedgesRing],
[thickWedgesRingCopy,targetRing],lines,offsetXYeachRing) #actual drawing of stimuli
lastFrame = currFrame #only used if useClock=True
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip(clearBuffer=True)
#if n == round(thisTrial['cueLeadTime']*refreshRate): #debug
# event.waitKeys(maxWait=20, keyList=['SPACE','ESCAPE','x'], timeStamped=False) #debugON
t=trialClock.getTime()-t0; ts.append(t);
myWin.flip()
if eyetracking:
tracker.stopEyeTracking()
#end of big stimulus loop
accelerateComputer(0,process_priority, disable_gc) #turn off stuff that sped everything up
#check for timing problems
interframeIntervs = np.diff(ts)*1000 #difference in time between successive frames, in ms
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded longerThanRefreshTolerance of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0:
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
msg = 'trialnum=' + str(trialNum) + longFramesStr
print(msg); logging.info(msg)
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0: flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
if np.NaN in flankingAlso: #was the first or last frame
logging.info('was first or last frame')
else:
logging.info( 'flankers also=' + str( np.around(interframeIntervs[flankingAlso],1) ))
#end timing check
passThisTrial=False
# ####### set up and collect responses
responses = list(); responsesAutopilot = list()
responses,responsesAutopilot, expStop = \
collectResponses(expStop) #collect responses!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#####
myMouse.setVisible(True)
core.wait(.1)
if exportImages: #maybe catch one frame of response
myWin.saveMovieFrames('exportedImages/frame.png')
expStop=True
#Handle response, calculate whether correct, ########################################
if autopilot:
responses = responsesAutopilot
#score response
if thisTrial['targetOffset'] >0:
answer = 'L'
else:
answer = 'G'
if responses[0] == answer:
correct = 1
else: correct = 0
if passThisTrial:
correct = -1 #indicate for data analysis that observer opted out of this trial, because think they moved their eyes
#header print('trialnum\tsubject\tbasicShape\tnumObjects\tspeed\tdirection\tangleIni
trials.data.add('subject', subject) #because extraInfo not working
trials.data.add('objToCueRing0', objToCue[0])
trials.data.add('numObjsRing0', numObjsEachRing[0])
trials.data.add('numCuesRing0', numCuesEachRing[0])
trials.data.add('response', responses[0]) #switching to using psychopy-native ways of storing, saving data
trials.data.add('correct', correct) #switching to using psychopy-native ways of storing, saving data
trials.data.add('timingBlips', numCasesInterframeLong)
numTrialsCorrect += (correct >0) #so count -1 as 0
speedIdxs = np.where(thisTrial['speed']==speeds)[0]
if len(speedIdxs) ==0:
print('Apparently current speed= ',thisTrial['speed'],' is not in list of speeds=',speeds, '. Please make sure speeds is a numpy array')
else: speedIdx = speedIdxs[0] #extract index, where returns a list with first element array of the indexes
numRightWrongEachSpeed[ speedIdx, int(correct >0) ] +=1 #if right, add to 1th column, otherwise add to 0th column count
if feedback and not expStop:
if correct:
highA.setVolume(0.8)
highA.play()
else: #incorrect
lowD.setVolume(0.8)
lowD.play()
core.wait(0.3)
trialNum+=1
waitForKeyPressBetweenTrials = False
if trialNum< trials.nTotal:
pctTrialsCompletedForBreak = np.array([.5,.75])
breakTrials = np.round(trials.nTotal*pctTrialsCompletedForBreak)
timeForTrialsRemainingMsg = np.any(trialNum==breakTrials)
if timeForTrialsRemainingMsg :
pctDone = round( (1.0*trialNum) / (1.0*trials.nTotal)*100, 0 )
NextRemindPctDoneText.setText( str(pctDone) + '% complete' )
NextRemindCountText.setText( str(trialNum) + ' of ' + str(trials.nTotal) )
for i in range(5):
myWin.flip(clearBuffer=True)
NextRemindPctDoneText.draw()
NextRemindCountText.draw()
waitingForKeypress = False
if waitForKeyPressBetweenTrials or timeForTrialsRemainingMsg:
waitingForKeypress=True
NextText.setText('Press "SPACE" to continue')
NextText.draw()
NextRemindCountText.draw()
#NextRemindText.draw()
myWin.flip(clearBuffer=True)
else: core.wait(0.15)
while waitingForKeypress:
if autopilot:
waitingForKeypress=False
elif expStop == True:
waitingForKeypress=False
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space']:
waitingForKeypress=False
if key in ['escape','q']:
expStop = True
waitingForKeypress=False
myWin.clearBuffer()
thisTrial = trials.next()
core.wait(.1); time.sleep(.1)
#end trials loop ###########################################################
if expStop == True:
msg = 'user aborted experiment on keypress with trials trialNum=' + str(trialNum)
logging.info(msg); print(msg)
else:
print("Experiment finished")
if trialNum >0:
fileNamePP = fileNameWithPath + '.txt'
dfFromPP = trials.saveAsWideText(fileNamePP)
print("Psychopy wideText has been saved as", fileNamePP)
fileNamePickle = fileNameWithPath #.psydat will automatically be appended
trials.saveAsPickle(fileNamePickle) #.psydat
print("Most Psychopy-ic method: trials trialHandler has been saved as", fileNamePickle+'.psydat', " and should include copy of code")
#see analysis/analyzeTest.py
df = dfFromPP[:trialNum] #delete trials for which don't have response etc. yet, as that will otherwise cause error when averaging, plotting
if trialNum < trials.nTotal: #When you abort early, correct and other columns are not numeric because have value of "-"
#converting to numeric
df = df.convert_objects(convert_numeric=True)
print('df.dtypes=', df.dtypes) #df.dtypes in my case are "objects". you can't take the mean
print('dfFromPP =', df)
if eyetracking and getEyeTrackingFileFromEyetrackingMachineAtEndOfExperiment:
tracker.closeConnectionToEyeTracker(eyeMoveFile)
logging.info('finishing at '+timeAndDateStr)
#print('%corr = ', round( correct*1.0/trialNum*100., 2) , '% of ',trialNum,' trials', end=' ')
print('%corr each speed: ', end=' ')
print(np.around( numRightWrongEachSpeed[:,1] / ( numRightWrongEachSpeed[:,0] + numRightWrongEachSpeed[:,1]), 2))
print('\t\t\t\tnum trials each speed =', numRightWrongEachSpeed[:,0] + numRightWrongEachSpeed[:,1])
logging.flush()
myWin.close()
if quitFinder:
applescript="\'tell application \"Finder\" to launch\'" #turn Finder back on
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#Fit and plot data
plotData = False
if trialNum >0 and plotData:
import plotHelpers
fig = plotHelpers.plotDataAndPsychometricCurve(df, dataFileName=None)
figName = 'pythonFig'
figFnameWithPath = os.path.join('analysis/figs/', figName + '.png')
import pylab
pylab.savefig( figFnameWithPath ) #, bbox_inches='tight')
print('The plot has been saved, as', figFnameWithPath)
pylab.show() #pauses until window manually closed. Have to save before calling this, because closing the window loses the figure
| 58.270415
| 200
| 0.678299
|
3963ed2560c11b7702d30d1e4fadc15f6a33bdf3
| 10,428
|
py
|
Python
|
tests/mechanisms/test_contrastive_hebbian_mechanism.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
tests/mechanisms/test_contrastive_hebbian_mechanism.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
tests/mechanisms/test_contrastive_hebbian_mechanism.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import psyneulink as pnl
import pytest
import psyneulink.core.components.functions.learningfunctions
import psyneulink.core.components.functions.transferfunctions
class TestContrastiveHebbian:
def test_scheduled_contrastive_hebbian(self):
o = pnl.TransferMechanism()
m = pnl.ContrastiveHebbianMechanism(
input_size=2,
hidden_size=0,
target_size=2,
separated=False,
mode=pnl.SIMPLE_HEBBIAN,
integrator_mode=True,
enable_learning=False,
matrix=[[0,-1],[-1, 0]],
# auto=0,
# hetero=-1,
)
# set max passes to ensure failure if no convergence instead of infinite loop
m.max_passes = 1000
s = pnl.sys(m, o)
ms = pnl.Scheduler(system=s)
ms.add_condition(o, pnl.WhenFinished(m))
s.scheduler_processing = ms
# m.reinitialize_when=pnl.Never()
print('matrix:\n', m.afferents[1].matrix)
results = s.run(inputs=[2, 2], num_trials=4)
print(results)
np.testing.assert_allclose(results, [[np.array([2.])], [np.array([2.])], [np.array([2.])], [np.array([2.])]])
def test_using_Hebbian_learning_of_orthognal_inputs_without_integrator_mode(self):
'''Same as tests/mechanisms/test_recurrent_transfer_mechanism/test_learning_of_orthognal_inputs
Tests that ContrastiveHebbianMechanism behaves like RecurrentTransferMechanism with Hebbian LearningFunction
(allowing for epsilon differences due CONVERGENCE CRITERION.
'''
size=4
R = pnl.ContrastiveHebbianMechanism(
input_size=4,
hidden_size=0,
target_size=4,
mode=pnl.SIMPLE_HEBBIAN,
enable_learning=True,
function=psyneulink.core.components.functions.transferfunctions.Linear,
learning_function=psyneulink.core.components.functions.learningfunctions.Hebbian,
minus_phase_termination_criterion=.01,
plus_phase_termination_criterion=.01,
# auto=0,
hetero=np.full((size,size),0.0)
)
P=pnl.Process(pathway=[R])
S=pnl.System(processes=[P])
inputs_dict = {R:[1,0,1,0]}
S.run(num_trials=4,
inputs=inputs_dict)
# KDM 10/2/18: removing this test from here, as it's kind of unimportant to this specific test
# and the behavior of the scheduler's time can be a bit odd - should hopefully fix that in future
# and test in its own module
# assert S.scheduler_processing.get_clock(S).previous_time.pass_ == 6
np.testing.assert_allclose(R.output_states[pnl.ACTIVITY_DIFFERENCE_OUTPUT].parameters.value.get(S),
[1.20074767, 0.0, 1.20074767, 0.0])
np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S), [1.20074767, 0.0, 1.20074767, 0.0])
np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S), [0.0, 0.0, 0.0, 0.0])
np.testing.assert_allclose(R.output_states[pnl.CURRENT_ACTIVITY_OUTPUT].parameters.value.get(S), [1.20074767, 0.0, 1.20074767, 0.0])
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(S),
[
[0.0, 0.0, 0.2399363, 0.0 ],
[0.0, 0.0, 0.0, 0.0 ],
[0.2399363, 0.0, 0.0, 0.0 ],
[0.0, 0.0, 0.0, 0.0 ]
]
)
# Reset state so learning of new pattern is "uncontaminated" by activity from previous one
R.output_state.parameters.value.set([0, 0, 0, 0], S)
inputs_dict = {R:[0,1,0,1]}
S.run(num_trials=4,
inputs=inputs_dict)
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(S),
[
[0.0, 0.0, 0.2399363, 0.0 ],
[0.0, 0.0, 0.0, 0.2399363 ],
[0.2399363, 0.0, 0.0, 0.0 ],
[0.0, 0.2399363, 0.0, 0.0 ]
]
)
np.testing.assert_allclose(R.output_states[pnl.ACTIVITY_DIFFERENCE_OUTPUT].parameters.value.get(S), [0.0, 1.20074767, 0.0, 1.20074767])
np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S), [0.0, 1.20074767, 0.0, 1.20074767])
np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S), [0.0, 0.0, 0.0, 0.0])
def test_using_Hebbian_learning_of_orthognal_inputs_with_integrator_mode(self):
'''Same as tests/mechanisms/test_recurrent_transfer_mechanism/test_learning_of_orthognal_inputs
Tests that ContrastiveHebbianMechanism behaves like RecurrentTransferMechanism with Hebbian LearningFunction
(allowing for epsilon differences due to INTEGRATION and convergence criterion).
'''
size=4
R = pnl.ContrastiveHebbianMechanism(
input_size=4,
hidden_size=0,
target_size=4,
separated=False,
mode=pnl.SIMPLE_HEBBIAN,
enable_learning=True,
function=psyneulink.core.components.functions.transferfunctions.Linear,
integrator_mode=True,
integration_rate=0.2,
learning_function=psyneulink.core.components.functions.learningfunctions.Hebbian,
minus_phase_termination_criterion=.01,
plus_phase_termination_criterion=.01,
# auto=0,
hetero=np.full((size,size),0.0)
)
P=pnl.Process(pathway=[R])
S=pnl.System(processes=[P])
inputs_dict = {R:[1,0,1,0]}
S.run(num_trials=4,
inputs=inputs_dict)
# KDM 10/2/18: removing this test from here, as it's kind of unimportant to this specific test
# and the behavior of the scheduler's time can be a bit odd - should hopefully fix that in future
# and test in its own module
# assert S.scheduler_processing.get_clock(S).previous_time.pass_ == 19
np.testing.assert_allclose(R.output_states[pnl.ACTIVITY_DIFFERENCE_OUTPUT].parameters.value.get(S),
[1.14142296, 0.0, 1.14142296, 0.0])
np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S), [1.14142296, 0.0, 1.14142296, 0.0])
np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S), [0.0, 0.0, 0.0, 0.0])
np.testing.assert_allclose(R.output_states[pnl.CURRENT_ACTIVITY_OUTPUT].parameters.value.get(S),
[1.1414229612568625, 0.0, 1.1414229612568625, 0.0])
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(S),
[
[0.0, 0.0, 0.22035998, 0.0 ],
[0.0, 0.0, 0.0, 0.0 ],
[0.22035998, 0.0, 0.0, 0.0 ],
[0.0, 0.0, 0.0, 0.0 ]
]
)
# Reset state so learning of new pattern is "uncontaminated" by activity from previous one
R.output_state.parameters.value.set([0, 0, 0, 0], S)
inputs_dict = {R:[0,1,0,1]}
S.run(num_trials=4,
inputs=inputs_dict)
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(S),
[
[0.0, 0.0, 0.22035998, 0.0 ],
[0.0, 0.0, 0.0, 0.22035998],
[0.22035998, 0.0, 0.0, 0. ],
[0.0, 0.22035998, 0.0, 0. ]
]
)
np.testing.assert_allclose(R.output_states[pnl.CURRENT_ACTIVITY_OUTPUT].parameters.value.get(S),
[0.0, 1.1414229612568625, 0.0, 1.1414229612568625])
np.testing.assert_allclose(R.output_states[pnl.ACTIVITY_DIFFERENCE_OUTPUT].parameters.value.get(S),
[ 0.0, 1.14142296, 0.0, 1.14142296])
np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S), [0.0, 1.14142296, 0.0, 1.14142296])
np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S), [0.0, 0.0, 0.0, 0.0])
def test_additional_output_states(self):
CHL1 = pnl.ContrastiveHebbianMechanism(
input_size=2, hidden_size=0, target_size=2,
additional_output_states=[pnl.PLUS_PHASE_OUTPUT, pnl.MINUS_PHASE_OUTPUT])
assert len(CHL1.output_states)==5
assert pnl.PLUS_PHASE_OUTPUT in CHL1.output_states.names
CHL2 = pnl.ContrastiveHebbianMechanism(
input_size=2, hidden_size=0, target_size=2,
additional_output_states=[pnl.PLUS_PHASE_OUTPUT, pnl.MINUS_PHASE_OUTPUT],
separated=False)
assert len(CHL2.output_states)==5
assert pnl.PLUS_PHASE_OUTPUT in CHL2.output_states.names
def test_configure_learning(self):
o = pnl.TransferMechanism()
m = pnl.ContrastiveHebbianMechanism(
input_size=2, hidden_size=0, target_size=2,
mode=pnl.SIMPLE_HEBBIAN,
separated=False,
matrix=[[0,-.5],[-.5,0]]
)
with pytest.warns(UserWarning) as record:
m.learning_enabled = True
correct_message_found = False
for warning in record:
if ("Learning cannot be enabled" in str(warning.message) and
"because it has no LearningMechanism" in str(warning.message)):
correct_message_found = True
break
assert correct_message_found
m.configure_learning()
m.reinitialize_when=pnl.Never()
s = pnl.sys(m,o)
ms = pnl.Scheduler(system=s)
ms.add_condition(o, pnl.WhenFinished(m))
s.scheduler_processing=ms
results = s.run(inputs=[2,2], num_trials=4)
np.testing.assert_allclose(results, [[[2.671875]],
[[2.84093837]],
[[3.0510183]],
[[3.35234623]]])
| 46.972973
| 143
| 0.573552
|
c2362a797949fb6e78b61e09010021aadf236b54
| 16,279
|
py
|
Python
|
setup.py
|
shareablee/cassandra-python-driver
|
0bc84227097d1304aaa8c22c9d5c68d83eb444a0
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
shareablee/cassandra-python-driver
|
0bc84227097d1304aaa8c22c9d5c68d83eb444a0
|
[
"Apache-2.0"
] | 1
|
2020-01-17T13:48:18.000Z
|
2020-01-17T13:48:18.000Z
|
setup.py
|
shareablee/cassandra-python-driver
|
0bc84227097d1304aaa8c22c9d5c68d83eb444a0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import warnings
if __name__ == '__main__' and sys.argv[1] == "gevent_nosetests":
print("Running gevent tests")
from gevent.monkey import patch_all
patch_all()
if __name__ == '__main__' and sys.argv[1] == "eventlet_nosetests":
print("Running eventlet tests")
from eventlet import monkey_patch
monkey_patch()
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
from distutils.command.build_ext import build_ext
from distutils.core import Extension
from distutils.errors import (CCompilerError, DistutilsPlatformError,
DistutilsExecError)
from distutils.cmd import Command
PY3 = sys.version_info[0] == 3
try:
import subprocess
has_subprocess = True
except ImportError:
has_subprocess = False
from cassandra import __version__
long_description = ""
with open("README.rst") as f:
long_description = f.read()
try:
from nose.commands import nosetests
except ImportError:
gevent_nosetests = None
eventlet_nosetests = None
else:
class gevent_nosetests(nosetests):
description = "run nosetests with gevent monkey patching"
class eventlet_nosetests(nosetests):
description = "run nosetests with eventlet monkey patching"
has_cqlengine = False
if __name__ == '__main__' and sys.argv[1] == "install":
try:
import cqlengine
has_cqlengine = True
except ImportError:
pass
PROFILING = False
class DocCommand(Command):
description = "generate or test documentation"
user_options = [("test", "t",
"run doctests instead of generating documentation")]
boolean_options = ["test"]
def initialize_options(self):
self.test = False
def finalize_options(self):
pass
def run(self):
if self.test:
path = "docs/_build/doctest"
mode = "doctest"
else:
path = "docs/_build/%s" % __version__
mode = "html"
try:
os.makedirs(path)
except:
pass
if has_subprocess:
# Prevent run with in-place extensions because cython-generated objects do not carry docstrings
# http://docs.cython.org/src/userguide/special_methods.html#docstrings
import glob
for f in glob.glob("cassandra/*.so"):
print("Removing '%s' to allow docs to run on pure python modules." %(f,))
os.unlink(f)
# Build io extension to make import and docstrings work
try:
output = subprocess.check_output(
["python", "setup.py", "build_ext", "--inplace", "--force", "--no-murmur3", "--no-cython"],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError("Documentation step '%s' failed: %s: %s" % ("build_ext", exc, exc.output))
else:
print(output)
try:
output = subprocess.check_output(
["sphinx-build", "-b", mode, "docs", path],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError("Documentation step '%s' failed: %s: %s" % (mode, exc, exc.output))
else:
print(output)
print("")
print("Documentation step '%s' performed, results here:" % mode)
print(" file://%s/%s/index.html" % (os.path.dirname(os.path.realpath(__file__)), path))
class BuildFailed(Exception):
def __init__(self, ext):
self.ext = ext
murmur3_ext = Extension('cassandra.cmurmur3',
sources=['cassandra/cmurmur3.c'])
libev_ext = Extension('cassandra.io.libevwrapper',
sources=['cassandra/io/libevwrapper.c'],
include_dirs=['/usr/include/libev', '/usr/local/include', '/opt/local/include'],
libraries=['ev'],
library_dirs=['/usr/local/lib', '/opt/local/lib'])
platform_unsupported_msg = \
"""
===============================================================================
The optional C extensions are not supported on this platform.
===============================================================================
"""
arch_unsupported_msg = \
"""
===============================================================================
The optional C extensions are not supported on big-endian systems.
===============================================================================
"""
pypy_unsupported_msg = \
"""
=================================================================================
Some optional C extensions are not supported in PyPy. Only murmur3 will be built.
=================================================================================
"""
is_windows = os.name == 'nt'
is_pypy = "PyPy" in sys.version
if is_pypy:
sys.stderr.write(pypy_unsupported_msg)
is_supported_platform = sys.platform != "cli" and not sys.platform.startswith("java")
is_supported_arch = sys.byteorder != "big"
if not is_supported_platform:
sys.stderr.write(platform_unsupported_msg)
elif not is_supported_arch:
sys.stderr.write(arch_unsupported_msg)
try_extensions = "--no-extensions" not in sys.argv and is_supported_platform and is_supported_arch and not os.environ.get('CASS_DRIVER_NO_EXTENSIONS')
try_murmur3 = try_extensions and "--no-murmur3" not in sys.argv
try_libev = try_extensions and "--no-libev" not in sys.argv and not is_pypy and not is_windows
try_cython = try_extensions and "--no-cython" not in sys.argv and not is_pypy and not os.environ.get('CASS_DRIVER_NO_CYTHON')
try_cython &= 'egg_info' not in sys.argv # bypass setup_requires for pip egg_info calls, which will never have --install-option"--no-cython" coming fomr pip
sys.argv = [a for a in sys.argv if a not in ("--no-murmur3", "--no-libev", "--no-cython", "--no-extensions")]
build_concurrency = int(os.environ.get('CASS_DRIVER_BUILD_CONCURRENCY', '0'))
class NoPatchExtension(Extension):
# Older versions of setuptools.extension has a static flag which is set False before our
# setup_requires lands Cython. It causes our *.pyx sources to be renamed to *.c in
# the initializer.
# The other workaround would be to manually generate sources, but that bypasses a lot
# of the niceness cythonize embodies (setup build dir, conditional build, etc).
# Newer setuptools does not have this problem because it checks for cython dynamically.
# https://bitbucket.org/pypa/setuptools/commits/714c3144e08fd01a9f61d1c88411e76d2538b2e4
def __init__(self, *args, **kwargs):
# bypass the patched init if possible
if Extension.__bases__:
base, = Extension.__bases__
base.__init__(self, *args, **kwargs)
else:
Extension.__init__(self, *args, **kwargs)
class build_extensions(build_ext):
error_message = """
===============================================================================
WARNING: could not compile %s.
The C extensions are not required for the driver to run, but they add support
for token-aware routing with the Murmur3Partitioner.
On Windows, make sure Visual Studio or an SDK is installed, and your environment
is configured to build for the appropriate architecture (matching your Python runtime).
This is often a matter of using vcvarsall.bat from your install directory, or running
from a command prompt in the Visual Studio Tools Start Menu.
===============================================================================
""" if is_windows else """
===============================================================================
WARNING: could not compile %s.
The C extensions are not required for the driver to run, but they add support
for libev and token-aware routing with the Murmur3Partitioner.
Linux users should ensure that GCC and the Python headers are available.
On Ubuntu and Debian, this can be accomplished by running:
$ sudo apt-get install build-essential python-dev
On RedHat and RedHat-based systems like CentOS and Fedora:
$ sudo yum install gcc python-devel
On OSX, homebrew installations of Python should provide the necessary headers.
libev Support
-------------
For libev support, you will also need to install libev and its headers.
On Debian/Ubuntu:
$ sudo apt-get install libev4 libev-dev
On RHEL/CentOS/Fedora:
$ sudo yum install libev libev-devel
On OSX, via homebrew:
$ brew install libev
===============================================================================
"""
def run(self):
try:
self._setup_extensions()
build_ext.run(self)
except DistutilsPlatformError as exc:
sys.stderr.write('%s\n' % str(exc))
warnings.warn(self.error_message % "C extensions.")
def build_extensions(self):
if build_concurrency > 1:
self.check_extensions_list(self.extensions)
import multiprocessing.pool
multiprocessing.pool.ThreadPool(processes=build_concurrency).map(self.build_extension, self.extensions)
else:
build_ext.build_extensions(self)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError,
DistutilsPlatformError, IOError) as exc:
sys.stderr.write('%s\n' % str(exc))
name = "The %s extension" % (ext.name,)
warnings.warn(self.error_message % (name,))
def _setup_extensions(self):
# We defer extension setup until this command to leveraage 'setup_requires' pulling in Cython before we
# attempt to import anything
self.extensions = []
if try_murmur3:
self.extensions.append(murmur3_ext)
if try_libev:
self.extensions.append(libev_ext)
if try_cython:
try:
from Cython.Build import cythonize
cython_candidates = ['cluster', 'concurrent', 'connection', 'cqltypes', 'metadata',
'pool', 'protocol', 'query', 'util']
compile_args = [] if is_windows else ['-Wno-unused-function']
self.extensions.extend(cythonize(
[Extension('cassandra.%s' % m, ['cassandra/%s.py' % m],
extra_compile_args=compile_args)
for m in cython_candidates],
nthreads=build_concurrency,
exclude_failures=True))
self.extensions.extend(cythonize(NoPatchExtension("*", ["cassandra/*.pyx"], extra_compile_args=compile_args),
nthreads=build_concurrency))
except Exception:
sys.stderr.write("Failed to cythonize one or more modules. These will not be compiled as extensions (optional).\n")
def pre_build_check():
"""
Try to verify build tools
"""
if os.environ.get('CASS_DRIVER_NO_PRE_BUILD_CHECK'):
return True
try:
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from distutils.dist import Distribution
# base build_ext just to emulate compiler option setup
be = build_ext(Distribution())
be.initialize_options()
be.finalize_options()
# First, make sure we have a Python include directory
have_python_include = any(os.path.isfile(os.path.join(p, 'Python.h')) for p in be.include_dirs)
if not have_python_include:
sys.stderr.write("Did not find 'Python.h' in %s.\n" % (be.include_dirs,))
return False
compiler = new_compiler(compiler=be.compiler)
customize_compiler(compiler)
executables = []
if compiler.compiler_type in ('unix', 'cygwin'):
executables = [compiler.executables[exe][0] for exe in ('compiler_so', 'linker_so')]
elif compiler.compiler_type == 'nt':
executables = [getattr(compiler, exe) for exe in ('cc', 'linker')]
if executables:
from distutils.spawn import find_executable
for exe in executables:
if not find_executable(exe):
sys.stderr.write("Failed to find %s for compiler type %s.\n" % (exe, compiler.compiler_type))
return False
except Exception as exc:
sys.stderr.write('%s\n' % str(exc))
sys.stderr.write("Failed pre-build check. Attempting anyway.\n")
# if we are unable to positively id the compiler type, or one of these assumptions fails,
# just proceed as we would have without the check
return True
def run_setup(extensions):
kw = {'cmdclass': {'doc': DocCommand}}
if gevent_nosetests is not None:
kw['cmdclass']['gevent_nosetests'] = gevent_nosetests
if eventlet_nosetests is not None:
kw['cmdclass']['eventlet_nosetests'] = eventlet_nosetests
kw['cmdclass']['build_ext'] = build_extensions
kw['ext_modules'] = [Extension('DUMMY', [])] # dummy extension makes sure build_ext is called for install
if try_cython:
# precheck compiler before adding to setup_requires
# we don't actually negate try_cython because:
# 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback
# 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools
if pre_build_check():
cython_dep = 'Cython>=0.20,!=0.25,<0.30'
kw['setup_requires'] = [cython_dep]
else:
sys.stderr.write("Bypassing Cython setup requirement\n")
dependencies = ['six >=1.6']
if not PY3:
dependencies.append('futures')
setup(
name='cassandra-driver',
version=__version__,
description='Python driver for Cassandra',
long_description=long_description,
url='http://github.com/datastax/python-driver',
author='Tyler Hobbs',
author_email='tyler@datastax.com',
packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine'],
keywords='cassandra,cql,orm',
include_package_data=True,
install_requires=dependencies,
tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
**kw)
run_setup(None)
if has_cqlengine:
warnings.warn("\n#######\n'cqlengine' package is present on path: %s\n"
"cqlengine is now an integrated sub-package of this driver.\n"
"It is recommended to remove this package to reduce the chance for conflicting usage" % cqlengine.__file__)
| 37.251716
| 157
| 0.61564
|
34158cc64450537c0d44bcc3a04a9d533b807c59
| 961
|
py
|
Python
|
custom_components/ha_kia_hyundai/base_entity.py
|
jaywryan/ha_kia_hyundai
|
422790bb42f0a402e9f9eb03834fca73fc195b09
|
[
"MIT"
] | 9
|
2021-12-18T21:30:10.000Z
|
2022-03-03T21:44:49.000Z
|
custom_components/ha_kia_hyundai/base_entity.py
|
jaywryan/ha_kia_hyundai
|
422790bb42f0a402e9f9eb03834fca73fc195b09
|
[
"MIT"
] | 34
|
2021-11-30T19:17:46.000Z
|
2021-12-16T01:33:24.000Z
|
custom_components/ha_kia_hyundai/base_entity.py
|
jaywryan/ha_kia_hyundai
|
422790bb42f0a402e9f9eb03834fca73fc195b09
|
[
"MIT"
] | 1
|
2022-01-14T08:00:44.000Z
|
2022-01-14T08:00:44.000Z
|
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
)
from .const import DOMAIN
from .vehicle import Vehicle
_LOGGER = logging.getLogger(__name__)
class DeviceInfoMixin:
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self._vehicle.identifier)},
"name": self._vehicle.name,
"manufacturer": f"{self._vehicle.api_cloud.region} {self._vehicle.api_cloud.brand}",
"model": self._vehicle.model,
"via_device": (DOMAIN, self._vehicle.identifier),
}
class BaseEntity(CoordinatorEntity[Vehicle], DeviceInfoMixin, Entity):
def __init__(self, vehicle: Vehicle):
super().__init__(vehicle.coordinator)
self._vehicle: Vehicle = vehicle
async def async_update(self) -> None:
"""
disable generic update method ...
"""
pass
| 27.457143
| 96
| 0.662851
|
81f45b174445ff45e11ab8dc03c9521a1b8c1bdb
| 189,064
|
py
|
Python
|
dace/frontend/python/replacements.py
|
meshtag/dace
|
e6751ee6a4f6356b47b93065d43cefb3fd54ebaa
|
[
"BSD-3-Clause"
] | null | null | null |
dace/frontend/python/replacements.py
|
meshtag/dace
|
e6751ee6a4f6356b47b93065d43cefb3fd54ebaa
|
[
"BSD-3-Clause"
] | null | null | null |
dace/frontend/python/replacements.py
|
meshtag/dace
|
e6751ee6a4f6356b47b93065d43cefb3fd54ebaa
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import ast
import copy
from copy import deepcopy as dcpy
import itertools
import warnings
from functools import reduce
from numbers import Number, Integral
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
import dace
from dace.codegen.tools import type_inference
from dace.config import Config
from dace import data, dtypes, subsets, symbolic, sdfg as sd
from dace.frontend.common import op_repository as oprepo
from dace.frontend.python.common import DaceSyntaxError
import dace.frontend.python.memlet_parser as mem_parser
from dace.frontend.python import astutils
from dace.frontend.python.nested_call import NestedCall
from dace.memlet import Memlet
from dace.sdfg import nodes, SDFG, SDFGState
from dace.symbolic import pystr_to_symbolic, issymbolic
import numpy as np
import sympy as sp
Size = Union[int, dace.symbolic.symbol]
Shape = Sequence[Size]
def normalize_axes(axes: Tuple[int], max_dim: int) -> List[int]:
""" Normalize a list of axes by converting negative dimensions to positive.
:param dims: the list of dimensions, possibly containing negative ints.
:param max_dim: the total amount of dimensions.
:return: a list of dimensions containing only positive ints.
"""
return [ax if ax >= 0 else max_dim + ax for ax in axes]
##############################################################################
# Python function replacements ###############################################
##############################################################################
@oprepo.replaces('dace.define_local')
@oprepo.replaces('dace.ndarray')
def _define_local_ex(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
shape: Shape,
dtype: dace.typeclass,
storage: dtypes.StorageType = dtypes.StorageType.Default,
lifetime: dtypes.AllocationLifetime = dtypes.AllocationLifetime.Scope):
""" Defines a local array in a DaCe program. """
if not isinstance(shape, (list, tuple)):
shape = [shape]
name, _ = sdfg.add_temp_transient(shape, dtype, storage=storage, lifetime=lifetime)
return name
@oprepo.replaces('numpy.ndarray')
def _define_local(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, shape: Shape, dtype: dace.typeclass):
""" Defines a local array in a DaCe program. """
return _define_local_ex(pv, sdfg, state, shape, dtype)
@oprepo.replaces('dace.define_local_scalar')
def _define_local_scalar(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
dtype: dace.typeclass,
storage: dtypes.StorageType = dtypes.StorageType.Default,
lifetime: dtypes.AllocationLifetime = dtypes.AllocationLifetime.Scope):
""" Defines a local scalar in a DaCe program. """
name = sdfg.temp_data_name()
_, desc = sdfg.add_scalar(name, dtype, transient=True, storage=storage, lifetime=lifetime)
pv.variables[name] = name
return name
@oprepo.replaces('dace.define_stream')
def _define_stream(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, dtype: dace.typeclass, buffer_size: Size = 1):
""" Defines a local stream array in a DaCe program. """
name = sdfg.temp_data_name()
sdfg.add_stream(name, dtype, buffer_size=buffer_size, transient=True)
return name
@oprepo.replaces('dace.define_streamarray')
@oprepo.replaces('dace.stream')
def _define_streamarray(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
shape: Shape,
dtype: dace.typeclass,
buffer_size: Size = 1):
""" Defines a local stream array in a DaCe program. """
name = sdfg.temp_data_name()
sdfg.add_stream(name, dtype, shape=shape, buffer_size=buffer_size, transient=True)
return name
@oprepo.replaces('numpy.array')
@oprepo.replaces('dace.array')
def _define_literal_ex(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
obj: Any,
dtype: dace.typeclass = None,
copy: bool = True,
order: str = 'K',
subok: bool = False,
ndmin: int = 0,
like: Any = None,
storage: Optional[dtypes.StorageType] = None,
lifetime: Optional[dtypes.AllocationLifetime] = None):
""" Defines a literal array in a DaCe program. """
if like is not None:
raise NotImplementedError('"like" argument unsupported for numpy.array')
name = sdfg.temp_data_name()
if dtype is not None and not isinstance(dtype, dtypes.typeclass):
dtype = dtypes.typeclass(dtype)
# From existing data descriptor
if isinstance(obj, str):
desc = dcpy(sdfg.arrays[obj])
if dtype is not None:
desc.dtype = dtype
else: # From literal / constant
if dtype is None:
arr = np.array(obj, copy=copy, order=order, subok=subok, ndmin=ndmin)
else:
npdtype = dtype.as_numpy_dtype()
arr = np.array(obj, npdtype, copy=copy, order=order, subok=subok, ndmin=ndmin)
desc = data.create_datadescriptor(arr)
# Set extra properties
desc.transient = True
if storage is not None:
desc.storage = storage
if lifetime is not None:
desc.lifetime = lifetime
sdfg.add_datadesc(name, desc)
# If using existing array, make copy. Otherwise, make constant
if isinstance(obj, str):
# Make copy
rnode = state.add_read(obj)
wnode = state.add_write(name)
state.add_nedge(rnode, wnode, dace.Memlet.from_array(name, desc))
else:
# Make constant
sdfg.add_constant(name, arr, desc)
return name
@oprepo.replaces('dace.reduce')
def _reduce(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
redfunction: Callable[[Any, Any], Any],
in_array: str,
out_array=None,
axis=None,
identity=None):
if out_array is None:
inarr = in_array
# Convert axes to tuple
if axis is not None and not isinstance(axis, (tuple, list)):
axis = (axis, )
if axis is not None:
axis = tuple(pystr_to_symbolic(a) for a in axis)
axis = tuple(normalize_axes(axis, len(sdfg.arrays[inarr].shape)))
input_subset = subsets.Range.from_array(sdfg.arrays[inarr])
input_memlet = Memlet.simple(inarr, input_subset)
output_shape = None
# check if we are reducing along all axes
if axis is not None and len(axis) == len(input_subset.size()):
reduce_all = all(x == y for x, y in zip(axis, range(len(input_subset.size()))))
else:
reduce_all = False
if axis is None or reduce_all:
output_shape = [1]
else:
output_subset = copy.deepcopy(input_subset)
output_subset.pop(axis)
output_shape = output_subset.size()
if (len(output_shape) == 1 and output_shape[0] == 1):
outarr = sdfg.temp_data_name()
outarr, arr = sdfg.add_scalar(outarr, sdfg.arrays[inarr].dtype, sdfg.arrays[inarr].storage, transient=True)
else:
outarr, arr = sdfg.add_temp_transient(output_shape, sdfg.arrays[inarr].dtype, sdfg.arrays[inarr].storage)
output_memlet = Memlet.from_array(outarr, arr)
else:
inarr = in_array
outarr = out_array
# Convert axes to tuple
if axis is not None and not isinstance(axis, (tuple, list)):
axis = (axis, )
if axis is not None:
axis = tuple(pystr_to_symbolic(a) for a in axis)
axis = tuple(normalize_axes(axis, len(sdfg.arrays[inarr].shape)))
# Compute memlets
input_subset = subsets.Range.from_array(sdfg.arrays[inarr])
input_memlet = Memlet.simple(inarr, input_subset)
output_subset = subsets.Range.from_array(sdfg.arrays[outarr])
output_memlet = Memlet.simple(outarr, output_subset)
# Create reduce subgraph
inpnode = state.add_read(inarr)
rednode = state.add_reduce(redfunction, axis, identity)
outnode = state.add_write(outarr)
state.add_nedge(inpnode, rednode, input_memlet)
state.add_nedge(rednode, outnode, output_memlet)
if out_array is None:
return outarr
else:
return []
@oprepo.replaces('numpy.eye')
def eye(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, N, M=None, k=0, dtype=dace.float64):
M = M or N
name, _ = sdfg.add_temp_transient([N, M], dtype)
state.add_mapped_tasklet('eye',
dict(i='0:%s' % N, j='0:%s' % M), {},
'val = 1 if i == (j - %s) else 0' % k,
dict(val=dace.Memlet.simple(name, 'i, j')),
external_edges=True)
return name
@oprepo.replaces('numpy.empty')
def _numpy_empty(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, shape: Shape, dtype: dace.typeclass):
""" Creates an unitialized array of the specificied shape and dtype. """
return _define_local(pv, sdfg, state, shape, dtype)
@oprepo.replaces('numpy.empty_like')
def _numpy_empty_like(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
prototype: str,
dtype: dace.typeclass = None,
shape: Shape = None):
""" Creates an unitialized array of the same shape and dtype as prototype.
The optional dtype and shape inputs allow overriding the corresponding
attributes of prototype.
"""
if prototype not in sdfg.arrays.keys():
raise mem_parser.DaceSyntaxError(pv, None, "Prototype argument {a} is not SDFG data!".format(a=prototype))
desc = sdfg.arrays[prototype]
dtype = dtype or desc.dtype
shape = shape or desc.shape
return _define_local(pv, sdfg, state, shape, dtype)
@oprepo.replaces('numpy.identity')
def _numpy_identity(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, n, dtype=dace.float64):
""" Generates the nxn identity matrix. """
return eye(pv, sdfg, state, n, dtype=dtype)
@oprepo.replaces('numpy.full')
def _numpy_full(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
shape: Shape,
fill_value: Union[sp.Expr, Number],
dtype: dace.typeclass = None):
""" Creates and array of the specified shape and initializes it with
the fill value.
"""
if isinstance(fill_value, (Number, np.bool_)):
vtype = dtypes.DTYPE_TO_TYPECLASS[type(fill_value)]
elif isinstance(fill_value, sp.Expr):
vtype = _sym_type(fill_value)
else:
raise mem_parser.DaceSyntaxError(pv, None, "Fill value {f} must be a number!".format(f=fill_value))
dtype = dtype or vtype
name, _ = sdfg.add_temp_transient(shape, dtype)
state.add_mapped_tasklet(
'_numpy_full_', {"__i{}".format(i): "0: {}".format(s)
for i, s in enumerate(shape)}, {},
"__out = {}".format(fill_value),
dict(__out=dace.Memlet.simple(name, ",".join(["__i{}".format(i) for i in range(len(shape))]))),
external_edges=True)
return name
@oprepo.replaces('numpy.full_like')
def _numpy_full_like(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
a: str,
fill_value: Number,
dtype: dace.typeclass = None,
shape: Shape = None):
""" Creates and array of the same shape and dtype as a and initializes it
with the fill value.
"""
if a not in sdfg.arrays.keys():
raise mem_parser.DaceSyntaxError(pv, None, "Prototype argument {a} is not SDFG data!".format(a=a))
desc = sdfg.arrays[a]
dtype = dtype or desc.dtype
shape = shape or desc.shape
return _numpy_full(pv, sdfg, state, shape, fill_value, dtype)
@oprepo.replaces('numpy.ones')
def _numpy_ones(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, shape: Shape, dtype: dace.typeclass = dace.float64):
""" Creates and array of the specified shape and initializes it with ones.
"""
return _numpy_full(pv, sdfg, state, shape, 1.0, dtype)
@oprepo.replaces('numpy.ones_like')
def _numpy_ones_like(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
a: str,
dtype: dace.typeclass = None,
shape: Shape = None):
""" Creates and array of the same shape and dtype as a and initializes it
with ones.
"""
return _numpy_full_like(pv, sdfg, state, a, 1.0, dtype, shape)
@oprepo.replaces('numpy.zeros')
def _numpy_zeros(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
shape: Shape,
dtype: dace.typeclass = dace.float64):
""" Creates and array of the specified shape and initializes it with zeros.
"""
return _numpy_full(pv, sdfg, state, shape, 0.0, dtype)
@oprepo.replaces('numpy.zeros_like')
def _numpy_zeros_like(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
a: str,
dtype: dace.typeclass = None,
shape: Shape = None):
""" Creates and array of the same shape and dtype as a and initializes it
with zeros.
"""
return _numpy_full_like(pv, sdfg, state, a, 0.0, dtype, shape)
@oprepo.replaces('numpy.copy')
def _numpy_copy(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: str):
""" Creates a copy of array a.
"""
if a not in sdfg.arrays.keys():
raise mem_parser.DaceSyntaxError(pv, None, "Prototype argument {a} is not SDFG data!".format(a=a))
# TODO: The whole AddTransientMethod class should be move in replacements.py
from dace.frontend.python.newast import _add_transient_data
name, desc = _add_transient_data(sdfg, sdfg.arrays[a])
rnode = state.add_read(a)
wnode = state.add_write(name)
state.add_nedge(rnode, wnode, dace.Memlet.from_array(name, desc))
return name
@oprepo.replaces('numpy.flip')
def _numpy_flip(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, axis=None):
""" Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
"""
if arr not in sdfg.arrays.keys():
raise mem_parser.DaceSyntaxError(pv, None, "Prototype argument {a} is not SDFG data!".format(a=arr))
desc = sdfg.arrays[arr]
if isinstance(desc, data.Stream):
raise mem_parser.DaceSyntaxError(pv, None, "Streams are not supported!")
if isinstance(desc, data.Scalar):
return arr
ndim = len(desc.shape)
if axis is None:
axis = [True] * ndim
else:
if not isinstance(axis, (list, tuple)):
axis = [axis]
axis = [a if a >= 0 else a + ndim for a in axis]
axis = [True if i in axis else False for i in range(ndim)]
# TODO: The following code assumes that code generation resolves an inverted copy.
# sset = ','.join([f'{s}-1:-1:-1' if a else f'0:{s}:1'
# for a, s in zip(axis, desc.shape)])
# dset = ','.join([f'0:{s}:1' for s in desc.shape])
# view = _ndarray_reshape(pv, sdfg, state, arr, desc.shape)
# acpy, _ = sdfg.add_temp_transient(desc.shape, desc.dtype, desc.storage)
# vnode = state.add_read(view)
# anode = state.add_read(acpy)
# state.add_edge(vnode, None, anode, None, Memlet(f'{view}[{sset}] -> {dset}'))
arr_copy, _ = sdfg.add_temp_transient_like(desc)
inpidx = ','.join([f'__i{i}' for i in range(ndim)])
outidx = ','.join([f'{s} - __i{i} - 1' if a else f'__i{i}' for i, (a, s) in enumerate(zip(axis, desc.shape))])
state.add_mapped_tasklet(name="_numpy_flip_",
map_ranges={f'__i{i}': f'0:{s}:1'
for i, s in enumerate(desc.shape)},
inputs={'__inp': Memlet(f'{arr}[{inpidx}]')},
code='__out = __inp',
outputs={'__out': Memlet(f'{arr_copy}[{outidx}]')},
external_edges=True)
return arr_copy
@oprepo.replaces('numpy.rot90')
def _numpy_rot90(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, k=1, axes=(0, 1)):
""" Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
"""
if arr not in sdfg.arrays.keys():
raise mem_parser.DaceSyntaxError(pv, None, "Prototype argument {a} is not SDFG data!".format(a=arr))
desc = sdfg.arrays[arr]
if not isinstance(desc, (data.Array, data.View)):
raise mem_parser.DaceSyntaxError(pv, None, "Only Arrays and Views supported!")
ndim = len(desc.shape)
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
if axes[0] == axes[1] or abs(axes[0] - axes[1]) == ndim:
raise ValueError("Axes must be different.")
if (axes[0] >= ndim or axes[0] < -ndim or axes[1] >= ndim or axes[1] < -ndim):
raise ValueError("Axes={} out of range for array of ndim={}.".format(axes, ndim))
k %= 4
to_flip = []
transpose = False
axes_list = list(range(ndim))
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]])
inpidx = ','.join([f'__i{i}' for i in range(ndim)])
if k == 0:
return arr
if k == 2:
to_flip = [axes[0], axes[1]]
elif k == 1:
to_flip = [axes[1]]
transpose = True
else: # k == 3
to_flip = [axes[0]]
transpose = True
arr_copy, narr = sdfg.add_temp_transient_like(desc)
shape_list = list(narr.shape)
if transpose:
shape_list[axes[0]], shape_list[axes[1]] = shape_list[axes[1]], shape_list[axes[0]]
# Make C-contiguous array shape
narr.shape = shape_list
narr.strides = [data._prod(shape_list[i + 1:]) for i in range(len(shape_list))]
narr.total_size = sum(((shp - 1) * s for shp, s in zip(narr.shape, narr.strides))) + 1
narr.alignment_offset = 0
out_indices = [f'{s} - __i{i} - 1' if i in to_flip else f'__i{i}' for i, s in enumerate(desc.shape)]
if transpose:
out_indices[axes[0]], out_indices[axes[1]] = out_indices[axes[1]], out_indices[axes[0]]
outidx = ','.join(out_indices)
state.add_mapped_tasklet(name="_rot90_",
map_ranges={f'__i{i}': f'0:{s}:1'
for i, s in enumerate(desc.shape)},
inputs={'__inp': Memlet(f'{arr}[{inpidx}]')},
code='__out = __inp',
outputs={'__out': Memlet(f'{arr_copy}[{outidx}]')},
external_edges=True)
return arr_copy
@oprepo.replaces('elementwise')
@oprepo.replaces('dace.elementwise')
def _elementwise(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, func: str, in_array: str, out_array=None):
"""Apply a lambda function to each element in the input"""
inparr = sdfg.arrays[in_array]
restype = sdfg.arrays[in_array].dtype
if out_array is None:
out_array, outarr = sdfg.add_temp_transient(inparr.shape, restype, inparr.storage)
else:
outarr = sdfg.arrays[out_array]
func_ast = ast.parse(func)
try:
lambda_ast = func_ast.body[0].value
if len(lambda_ast.args.args) != 1:
raise SyntaxError("Expected lambda with one arg, but {} has {}".format(func, len(lambda_ast.args.arrgs)))
arg = lambda_ast.args.args[0].arg
replaced_ast = astutils.ASTFindReplace({arg: '__inp'}).visit(lambda_ast.body)
body = astutils.unparse(replaced_ast)
except AttributeError:
raise SyntaxError("Could not parse func {}".format(func))
code = "__out = {}".format(body)
num_elements = reduce(lambda x, y: x * y, inparr.shape)
if num_elements == 1:
inp = state.add_read(in_array)
out = state.add_write(out_array)
tasklet = state.add_tasklet("_elementwise_", {'__inp'}, {'__out'}, code)
state.add_edge(inp, None, tasklet, '__inp', Memlet.from_array(in_array, inparr))
state.add_edge(tasklet, '__out', out, None, Memlet.from_array(out_array, outarr))
else:
state.add_mapped_tasklet(
name="_elementwise_",
map_ranges={'__i%d' % i: '0:%s' % n
for i, n in enumerate(inparr.shape)},
inputs={'__inp': Memlet.simple(in_array, ','.join(['__i%d' % i for i in range(len(inparr.shape))]))},
code=code,
outputs={'__out': Memlet.simple(out_array, ','.join(['__i%d' % i for i in range(len(inparr.shape))]))},
external_edges=True)
return out_array
def _simple_call(sdfg: SDFG, state: SDFGState, inpname: str, func: str, restype: dace.typeclass = None):
""" Implements a simple call of the form `out = func(inp)`. """
if isinstance(inpname, (list, tuple)): # TODO investigate this
inpname = inpname[0]
if not isinstance(inpname, str):
# Constant parameter
cst = inpname
inparr = data.create_datadescriptor(cst)
inpname = sdfg.temp_data_name()
inparr.transient = True
sdfg.add_constant(inpname, cst, inparr)
sdfg.add_datadesc(inpname, inparr)
else:
inparr = sdfg.arrays[inpname]
if restype is None:
restype = inparr.dtype
outname, outarr = sdfg.add_temp_transient_like(inparr)
outarr.dtype = restype
num_elements = data._prod(inparr.shape)
if num_elements == 1:
inp = state.add_read(inpname)
out = state.add_write(outname)
tasklet = state.add_tasklet(func, {'__inp'}, {'__out'}, '__out = {f}(__inp)'.format(f=func))
state.add_edge(inp, None, tasklet, '__inp', Memlet.from_array(inpname, inparr))
state.add_edge(tasklet, '__out', out, None, Memlet.from_array(outname, outarr))
else:
state.add_mapped_tasklet(
name=func,
map_ranges={'__i%d' % i: '0:%s' % n
for i, n in enumerate(inparr.shape)},
inputs={'__inp': Memlet.simple(inpname, ','.join(['__i%d' % i for i in range(len(inparr.shape))]))},
code='__out = {f}(__inp)'.format(f=func),
outputs={'__out': Memlet.simple(outname, ','.join(['__i%d' % i for i in range(len(inparr.shape))]))},
external_edges=True)
return outname
def _complex_to_scalar(complex_type: dace.typeclass):
if complex_type is dace.complex64:
return dace.float32
elif complex_type is dace.complex128:
return dace.float64
else:
return complex_type
@oprepo.replaces('exp')
@oprepo.replaces('dace.exp')
@oprepo.replaces('numpy.exp')
@oprepo.replaces('math.exp')
def _exp(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
return _simple_call(sdfg, state, input, 'exp')
@oprepo.replaces('sin')
@oprepo.replaces('dace.sin')
@oprepo.replaces('numpy.sin')
@oprepo.replaces('math.sin')
def _sin(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
return _simple_call(sdfg, state, input, 'sin')
@oprepo.replaces('cos')
@oprepo.replaces('dace.cos')
@oprepo.replaces('numpy.cos')
@oprepo.replaces('math.cos')
def _cos(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
return _simple_call(sdfg, state, input, 'cos')
@oprepo.replaces('sqrt')
@oprepo.replaces('dace.sqrt')
@oprepo.replaces('numpy.sqrt')
@oprepo.replaces('math.sqrt')
def _sqrt(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
return _simple_call(sdfg, state, input, 'sqrt')
@oprepo.replaces('log')
@oprepo.replaces('dace.log')
@oprepo.replaces('numpy.log')
@oprepo.replaces('math.log')
def _log(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
return _simple_call(sdfg, state, input, 'log')
@oprepo.replaces('math.floor')
def _floor(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
return _simple_call(sdfg, state, input, 'floor', restype=dtypes.typeclass(int))
@oprepo.replaces('math.ceil')
def _ceil(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
return _simple_call(sdfg, state, input, 'ceil', restype=dtypes.typeclass(int))
@oprepo.replaces('conj')
@oprepo.replaces('dace.conj')
@oprepo.replaces('numpy.conj')
def _conj(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
return _simple_call(sdfg, state, input, 'conj')
@oprepo.replaces('real')
@oprepo.replaces('dace.real')
@oprepo.replaces('numpy.real')
def _real(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
inptype = sdfg.arrays[input].dtype
return _simple_call(sdfg, state, input, 'real', _complex_to_scalar(inptype))
@oprepo.replaces('imag')
@oprepo.replaces('dace.imag')
@oprepo.replaces('numpy.imag')
def _imag(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: str):
inptype = sdfg.arrays[input].dtype
return _simple_call(sdfg, state, input, 'imag', _complex_to_scalar(inptype))
@oprepo.replaces('abs')
def _abs(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, input: Union[str, Number, symbolic.symbol]):
return _simple_call(sdfg, state, input, 'abs')
@oprepo.replaces('transpose')
@oprepo.replaces('dace.transpose')
@oprepo.replaces('numpy.transpose')
def _transpose(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, inpname: str, axes=None):
if axes is None:
arr1 = sdfg.arrays[inpname]
restype = arr1.dtype
outname, arr2 = sdfg.add_temp_transient((arr1.shape[1], arr1.shape[0]), restype, arr1.storage)
acc1 = state.add_read(inpname)
acc2 = state.add_write(outname)
import dace.libraries.blas # Avoid import loop
tasklet = dace.libraries.blas.Transpose('_Transpose_', restype)
state.add_node(tasklet)
state.add_edge(acc1, None, tasklet, '_inp', dace.Memlet.from_array(inpname, arr1))
state.add_edge(tasklet, '_out', acc2, None, dace.Memlet.from_array(outname, arr2))
else:
arr1 = sdfg.arrays[inpname]
if len(axes) != len(arr1.shape) or sorted(axes) != list(range(len(arr1.shape))):
raise ValueError("axes don't match array")
new_shape = [arr1.shape[i] for i in axes]
outname, arr2 = sdfg.add_temp_transient(new_shape, arr1.dtype, arr1.storage)
state.add_mapped_tasklet(
"_transpose_", {"_i{}".format(i): "0:{}".format(s)
for i, s in enumerate(arr1.shape)},
dict(_in=Memlet.simple(inpname, ", ".join("_i{}".format(i) for i, _ in enumerate(arr1.shape)))),
"_out = _in",
dict(_out=Memlet.simple(outname, ", ".join("_i{}".format(axes[i]) for i, _ in enumerate(arr1.shape)))),
external_edges=True)
return outname
@oprepo.replaces('numpy.sum')
def _sum(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: str, axis=None):
return _reduce(pv, sdfg, state, "lambda x, y: x + y", a, axis=axis, identity=0)
@oprepo.replaces('numpy.mean')
def _mean(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: str, axis=None):
nest = NestedCall(pv, sdfg, state)
sum = nest(_sum)(a, axis=axis)
if axis is None:
div_amount = reduce(lambda x, y: x * y, (d for d in sdfg.arrays[a].shape))
elif isinstance(axis, (tuple, list)):
axis = normalize_axes(axis, len(sdfg.arrays[a].shape))
# each entry needs to be divided by the size of the reduction
div_amount = reduce(lambda x, y: x * y, (d for i, d in enumerate(sdfg.arrays[a].shape) if i in axis))
else:
div_amount = sdfg.arrays[a].shape[axis]
return nest, nest(_elementwise)("lambda x: x / ({})".format(div_amount), sum)
@oprepo.replaces('numpy.max')
@oprepo.replaces('numpy.amax')
def _max(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: str, axis=None):
return _reduce(pv,
sdfg,
state,
"lambda x, y: max(x, y)",
a,
axis=axis,
identity=dtypes.min_value(sdfg.arrays[a].dtype))
@oprepo.replaces('numpy.min')
@oprepo.replaces('numpy.amin')
def _min(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: str, axis=None):
return _reduce(pv,
sdfg,
state,
"lambda x, y: min(x, y)",
a,
axis=axis,
identity=dtypes.max_value(sdfg.arrays[a].dtype))
def _minmax2(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: str, b: str, ismin=True):
""" Implements the min or max function with 2 scalar arguments. """
in_conn = set()
out_conn = {'__out'}
if isinstance(a, str) and a in sdfg.arrays.keys():
desc_a = sdfg.arrays[a]
read_a = state.add_read(a)
conn_a = '__in_a'
in_conn.add(conn_a)
else:
desc_a = a
read_a = None
conn_a = symbolic.symstr(a)
if isinstance(b, str) and b in sdfg.arrays.keys():
desc_b = sdfg.arrays[b]
read_b = state.add_read(b)
conn_b = '__in_b'
in_conn.add(conn_b)
else:
desc_b = b
read_b = None
conn_b = symbolic.symstr(b)
dtype_c, [cast_a, cast_b] = _result_type([desc_a, desc_b])
arg_a, arg_b = "{in1}".format(in1=conn_a), "{in2}".format(in2=conn_b)
if cast_a:
arg_a = "{ca}({in1})".format(ca=str(cast_a).replace('::', '.'), in1=conn_a)
if cast_b:
arg_b = "{cb}({in2})".format(cb=str(cast_b).replace('::', '.'), in2=conn_b)
func = 'min' if ismin else 'max'
tasklet = nodes.Tasklet(f'__{func}2', in_conn, out_conn, f'__out = {func}({arg_a}, {arg_b})')
c = _define_local_scalar(pv, sdfg, state, dtype_c)
desc_c = sdfg.arrays[c]
write_c = state.add_write(c)
if read_a:
state.add_edge(read_a, None, tasklet, '__in_a', Memlet.from_array(a, desc_a))
if read_b:
state.add_edge(read_b, None, tasklet, '__in_b', Memlet.from_array(b, desc_b))
state.add_edge(tasklet, '__out', write_c, None, Memlet.from_array(c, desc_c))
return c
# NOTE: We support only the version of Python max that takes scalar arguments.
# For iterable arguments one must use the equivalent NumPy methods.
@oprepo.replaces('max')
def _pymax(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: Union[str, Number, symbolic.symbol], *args):
left_arg = a
current_state = state
for i, b in enumerate(args):
if i > 0:
pv._add_state('__min2_%d' % i)
pv.last_state.set_default_lineinfo(pv.current_lineinfo)
current_state = pv.last_state
left_arg = _minmax2(pv, sdfg, current_state, left_arg, b, ismin=False)
return left_arg
# NOTE: We support only the version of Python min that takes scalar arguments.
# For iterable arguments one must use the equivalent NumPy methods.
@oprepo.replaces('min')
def _pymin(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: Union[str, Number, symbolic.symbol], *args):
left_arg = a
current_state = state
for i, b in enumerate(args):
if i > 0:
pv._add_state('__min2_%d' % i)
pv.last_state.set_default_lineinfo(pv.current_lineinfo)
current_state = pv.last_state
left_arg = _minmax2(pv, sdfg, current_state, left_arg, b)
return left_arg
@oprepo.replaces('slice')
def _slice(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, *args, **kwargs):
return (slice(*args, **kwargs), )
@oprepo.replaces('numpy.argmax')
def _argmax(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: str, axis, result_type=dace.int32):
return _argminmax(pv, sdfg, state, a, axis, func="max", result_type=result_type)
@oprepo.replaces('numpy.argmin')
def _argmin(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a: str, axis, result_type=dace.int32):
return _argminmax(pv, sdfg, state, a, axis, func="min", result_type=result_type)
def _argminmax(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
a: str,
axis,
func,
result_type=dace.int32,
return_both=False):
nest = NestedCall(pv, sdfg, state)
assert func in ['min', 'max']
if axis is None or not isinstance(axis, Integral):
raise SyntaxError('Axis must be an int')
a_arr = sdfg.arrays[a]
if not 0 <= axis < len(a_arr.shape):
raise SyntaxError("Expected 0 <= axis < len({}.shape), got {}".format(a, axis))
reduced_shape = list(copy.deepcopy(a_arr.shape))
reduced_shape.pop(axis)
val_and_idx = dace.struct('_val_and_idx', val=a_arr.dtype, idx=result_type)
# HACK: since identity cannot be specified for structs, we have to init the output array
reduced_structs, reduced_struct_arr = sdfg.add_temp_transient(reduced_shape, val_and_idx)
code = "__init = _val_and_idx(val={}, idx=-1)".format(
dtypes.min_value(a_arr.dtype) if func == 'max' else dtypes.max_value(a_arr.dtype))
nest.add_state().add_mapped_tasklet(
name="_arg{}_convert_".format(func),
map_ranges={'__i%d' % i: '0:%s' % n
for i, n in enumerate(a_arr.shape) if i != axis},
inputs={},
code=code,
outputs={
'__init': Memlet.simple(reduced_structs,
','.join('__i%d' % i for i in range(len(a_arr.shape)) if i != axis))
},
external_edges=True)
nest.add_state().add_mapped_tasklet(
name="_arg{}_reduce_".format(func),
map_ranges={'__i%d' % i: '0:%s' % n
for i, n in enumerate(a_arr.shape)},
inputs={'__in': Memlet.simple(a, ','.join('__i%d' % i for i in range(len(a_arr.shape))))},
code="__out = _val_and_idx(idx={}, val=__in)".format("__i%d" % axis),
outputs={
'__out':
Memlet.simple(reduced_structs,
','.join('__i%d' % i for i in range(len(a_arr.shape)) if i != axis),
wcr_str=("lambda x, y:"
"_val_and_idx(val={}(x.val, y.val), "
"idx=(y.idx if x.val {} y.val else x.idx))").format(
func, '<' if func == 'max' else '>'))
},
external_edges=True)
if return_both:
outidx, outidxarr = sdfg.add_temp_transient(sdfg.arrays[reduced_structs].shape, result_type)
outval, outvalarr = sdfg.add_temp_transient(sdfg.arrays[reduced_structs].shape, a_arr.dtype)
nest.add_state().add_mapped_tasklet(
name="_arg{}_extract_".format(func),
map_ranges={'__i%d' % i: '0:%s' % n
for i, n in enumerate(a_arr.shape) if i != axis},
inputs={
'__in': Memlet.simple(reduced_structs,
','.join('__i%d' % i for i in range(len(a_arr.shape)) if i != axis))
},
code="__out_val = __in.val\n__out_idx = __in.idx",
outputs={
'__out_val': Memlet.simple(outval, ','.join('__i%d' % i for i in range(len(a_arr.shape)) if i != axis)),
'__out_idx': Memlet.simple(outidx, ','.join('__i%d' % i for i in range(len(a_arr.shape)) if i != axis))
},
external_edges=True)
return nest, (outval, outidx)
else:
# map to result_type
out, outarr = sdfg.add_temp_transient(sdfg.arrays[reduced_structs].shape, result_type)
nest(_elementwise)("lambda x: x.idx", reduced_structs, out_array=out)
return nest, out
@oprepo.replaces('numpy.where')
def _array_array_where(visitor: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
cond_operand: str,
left_operand: str = None,
right_operand: str = None):
if left_operand is None or right_operand is None:
raise ValueError('numpy.where is only supported for the case where x and y are given')
cond_arr = sdfg.arrays[cond_operand]
left_arr = sdfg.arrays.get(left_operand, None)
right_arr = sdfg.arrays.get(right_operand, None)
left_type = left_arr.dtype if left_arr else dtypes.DTYPE_TO_TYPECLASS[type(left_operand)]
right_type = right_arr.dtype if right_arr else dtypes.DTYPE_TO_TYPECLASS[type(right_operand)]
# Implicit Python coversion implemented as casting
arguments = [cond_arr, left_arr or left_type, right_arr or right_type]
tasklet_args = ['__incond', '__in1' if left_arr else left_operand, '__in2' if right_arr else right_operand]
result_type, casting = _result_type(arguments[1:])
left_cast = casting[0]
right_cast = casting[1]
if left_cast is not None:
tasklet_args[1] = f"{str(left_cast).replace('::', '.')}({tasklet_args[1]})"
if right_cast is not None:
tasklet_args[2] = f"{str(right_cast).replace('::', '.')}({tasklet_args[2]})"
left_shape = left_arr.shape if left_arr else [1]
right_shape = right_arr.shape if right_arr else [1]
cond_shape = cond_arr.shape if cond_arr else [1]
(out_shape, all_idx_dict, out_idx, left_idx, right_idx) = _broadcast_together(left_shape, right_shape)
# Broadcast condition with broadcasted left+right
_, _, _, cond_idx, _ = _broadcast_together(cond_shape, out_shape)
# Fix for Scalars
if isinstance(left_arr, data.Scalar):
left_idx = subsets.Range([(0, 0, 1)])
if isinstance(right_arr, data.Scalar):
right_idx = subsets.Range([(0, 0, 1)])
if isinstance(cond_arr, data.Scalar):
cond_idx = subsets.Range([(0, 0, 1)])
if left_arr is None and right_arr is None:
raise ValueError('Both x and y cannot be scalars in numpy.where')
storage = left_arr.storage if left_arr else right_arr.storage
out_operand, out_arr = sdfg.add_temp_transient(out_shape, result_type, storage)
if list(out_shape) == [1]:
tasklet = state.add_tasklet('_where_', {'__incond', '__in1', '__in2'}, {'__out'},
'__out = {i1} if __incond else {i2}'.format(i1=tasklet_args[1], i2=tasklet_args[2]))
n0 = state.add_read(cond_operand)
n3 = state.add_write(out_operand)
state.add_edge(n0, None, tasklet, '__incond', dace.Memlet.from_array(cond_operand, cond_arr))
if left_arr:
n1 = state.add_read(left_operand)
state.add_edge(n1, None, tasklet, '__in1', dace.Memlet.from_array(left_operand, left_arr))
if right_arr:
n2 = state.add_read(right_operand)
state.add_edge(n2, None, tasklet, '__in2', dace.Memlet.from_array(right_operand, right_arr))
state.add_edge(tasklet, '__out', n3, None, dace.Memlet.from_array(out_operand, out_arr))
else:
inputs = {}
inputs['__incond'] = Memlet.simple(cond_operand, cond_idx)
if left_arr:
inputs['__in1'] = Memlet.simple(left_operand, left_idx)
if right_arr:
inputs['__in2'] = Memlet.simple(right_operand, right_idx)
state.add_mapped_tasklet("_where_",
all_idx_dict,
inputs,
'__out = {i1} if __incond else {i2}'.format(i1=tasklet_args[1], i2=tasklet_args[2]),
{'__out': Memlet.simple(out_operand, out_idx)},
external_edges=True)
return out_operand
##############################################################################
# Python operation replacements ##############################################
##############################################################################
def _unop(sdfg: SDFG, state: SDFGState, op1: str, opcode: str, opname: str):
""" Implements a general element-wise array unary operator. """
arr1 = sdfg.arrays[op1]
restype, cast = _result_type([arr1], opname)
tasklet_code = "__out = {} __in1".format(opcode)
if cast:
tasklet_code = tasklet_code.replace('__in1', "{}(__in1)".format(cast))
# NOTE: This is a fix for np.bool_, which is a true boolean.
# In this case, the invert operator must become a not operator.
if opcode == '~' and arr1.dtype == dace.bool_:
opcode = 'not'
name, _ = sdfg.add_temp_transient(arr1.shape, restype, arr1.storage)
state.add_mapped_tasklet("_%s_" % opname, {'__i%d' % i: '0:%s' % s
for i, s in enumerate(arr1.shape)},
{'__in1': Memlet.simple(op1, ','.join(['__i%d' % i for i in range(len(arr1.shape))]))},
'__out = %s __in1' % opcode,
{'__out': Memlet.simple(name, ','.join(['__i%d' % i for i in range(len(arr1.shape))]))},
external_edges=True)
return name
def _broadcast_to(target_shape, operand_shape):
# the difference to normal broadcasting is that the broadcasted shape is the same as the target
# I was unable to find documentation for this in numpy, so we follow the description from ONNX
results = _broadcast_together(target_shape, operand_shape, unidirectional=True)
# the output_shape should be equal to the target_shape
assert all(i == o for i, o in zip(target_shape, results[0]))
return results
def _broadcast_together(arr1_shape, arr2_shape, unidirectional=False):
all_idx_dict, all_idx, a1_idx, a2_idx = {}, [], [], []
max_i = max(len(arr1_shape), len(arr2_shape))
def get_idx(i):
return "__i" + str(max_i - i - 1)
for i, (dim1, dim2) in enumerate(itertools.zip_longest(reversed(arr1_shape), reversed(arr2_shape))):
all_idx.append(get_idx(i))
if dim1 == dim2:
a1_idx.append(get_idx(i))
a2_idx.append(get_idx(i))
all_idx_dict[get_idx(i)] = dim1
# if unidirectional, dim2 must also be 1
elif dim1 == 1 and dim2 is not None and not unidirectional:
a1_idx.append("0")
# dim2 != 1 must hold here
a2_idx.append(get_idx(i))
all_idx_dict[get_idx(i)] = dim2
elif dim2 == 1 and dim1 is not None:
# dim1 != 1 must hold here
a1_idx.append(get_idx(i))
a2_idx.append("0")
all_idx_dict[get_idx(i)] = dim1
# if unidirectional, this is not allowed
elif dim1 == None and not unidirectional:
# dim2 != None must hold here
a2_idx.append(get_idx(i))
all_idx_dict[get_idx(i)] = dim2
elif dim2 == None:
# dim1 != None must hold here
a1_idx.append(get_idx(i))
all_idx_dict[get_idx(i)] = dim1
else:
if unidirectional:
raise SyntaxError(f"could not broadcast input array from shape {arr2_shape} into shape {arr1_shape}")
else:
raise SyntaxError("operands could not be broadcast together with shapes {}, {}".format(
arr1_shape, arr2_shape))
def to_string(idx):
return ", ".join(reversed(idx))
out_shape = tuple(reversed([all_idx_dict[idx] for idx in all_idx]))
all_idx_tup = [(k, "0:" + str(all_idx_dict[k])) for k in reversed(all_idx)]
return out_shape, all_idx_tup, to_string(all_idx), to_string(a1_idx), to_string(a2_idx)
def _binop(sdfg: SDFG, state: SDFGState, op1: str, op2: str, opcode: str, opname: str, restype: dace.typeclass):
""" Implements a general element-wise array binary operator. """
arr1 = sdfg.arrays[op1]
arr2 = sdfg.arrays[op2]
out_shape, all_idx_tup, all_idx, arr1_idx, arr2_idx = _broadcast_together(arr1.shape, arr2.shape)
name, _ = sdfg.add_temp_transient(out_shape, restype, arr1.storage)
state.add_mapped_tasklet("_%s_" % opname,
all_idx_tup, {
'__in1': Memlet.simple(op1, arr1_idx),
'__in2': Memlet.simple(op2, arr2_idx)
},
'__out = __in1 %s __in2' % opcode, {'__out': Memlet.simple(name, all_idx)},
external_edges=True)
return name
# Defined as a function in order to include the op and the opcode in the closure
def _makeunop(op, opcode):
@oprepo.replaces_operator('Array', op)
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2=None):
return _unop(sdfg, state, op1, opcode, op)
@oprepo.replaces_operator('View', op)
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2=None):
return _unop(sdfg, state, op1, opcode, op)
@oprepo.replaces_operator('Scalar', op)
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2=None):
scalar1 = sdfg.arrays[op1]
restype, _ = _result_type([scalar1], op)
op2 = sdfg.temp_data_name()
_, scalar2 = sdfg.add_scalar(op2, restype, transient=True)
tasklet = state.add_tasklet("_%s_" % op, {'__in'}, {'__out'}, "__out = %s __in" % opcode)
node1 = state.add_read(op1)
node2 = state.add_write(op2)
state.add_edge(node1, None, tasklet, '__in', dace.Memlet.from_array(op1, scalar1))
state.add_edge(tasklet, '__out', node2, None, dace.Memlet.from_array(op2, scalar2))
return op2
@oprepo.replaces_operator('NumConstant', op)
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: Number, op2=None):
expr = '{o}(op1)'.format(o=opcode)
vars = {'op1': op1}
return eval(expr, vars)
@oprepo.replaces_operator('BoolConstant', op)
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: Number, op2=None):
expr = '{o}(op1)'.format(o=opcode)
vars = {'op1': op1}
return eval(expr, vars)
@oprepo.replaces_operator('symbol', op)
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: 'symbol', op2=None):
if opcode in _pyop2symtype.keys():
try:
return _pyop2symtype[opcode](op1)
except TypeError:
pass
expr = '{o}(op1)'.format(o=opcode)
vars = {'op1': op1}
return eval(expr, vars)
def _is_op_arithmetic(op: str):
if op in {'Add', 'Sub', 'Mult', 'Div', 'FloorDiv', 'Pow', 'Mod', 'FloatPow', 'Heaviside', 'Arctan2', 'Hypot'}:
return True
return False
def _is_op_bitwise(op: str):
if op in {'LShift', 'RShift', 'BitOr', 'BitXor', 'BitAnd', 'Invert'}:
return True
return False
def _is_op_boolean(op: str):
if op in {
'And', 'Or', 'Not', 'Eq', 'NotEq', 'Lt', 'LtE', 'Gt', 'GtE', 'Is', 'NotIs', 'Xor', 'FpBoolean', 'SignBit'
}:
return True
return False
def _representative_num(dtype: Union[dtypes.typeclass, Number]) -> Number:
if isinstance(dtype, dtypes.typeclass):
nptype = dtype.type
else:
nptype = dtype
if issubclass(nptype, bool):
return True
elif issubclass(nptype, np.bool_):
return np.bool_(True)
elif issubclass(nptype, Integral):
# NOTE: Returning the max representable integer seems a better choice
# than 1, however it was causing issues with some programs. This should
# be revisited in the future.
# return nptype(np.iinfo(nptype).max)
return nptype(1)
else:
return nptype(np.finfo(nptype).resolution)
def _np_result_type(nptypes):
# Fix for np.result_type returning platform-dependent types,
# e.g. np.longlong
restype = np.result_type(*nptypes)
if restype.type not in dtypes.DTYPE_TO_TYPECLASS.keys():
for k in dtypes.DTYPE_TO_TYPECLASS.keys():
if k == restype.type:
return dtypes.DTYPE_TO_TYPECLASS[k]
return dtypes.DTYPE_TO_TYPECLASS[restype.type]
def _sym_type(expr: Union[symbolic.symbol, sp.Basic]) -> dtypes.typeclass:
if isinstance(expr, symbolic.symbol):
return expr.dtype
representative_value = expr.subs([(s, _representative_num(s.dtype)) for s in expr.free_symbols])
pyval = eval(astutils.unparse(representative_value))
# Overflow check
if isinstance(pyval, int) and (pyval > np.iinfo(np.int64).max or pyval < np.iinfo(np.int64).min):
nptype = np.int64
else:
nptype = np.result_type(pyval)
return _np_result_type([nptype])
def _cast_str(dtype: dtypes.typeclass) -> str:
return dtypes.TYPECLASS_TO_STRING[dtype].replace('::', '.')
def _result_type(arguments: Sequence[Union[str, Number, symbolic.symbol, sp.Basic]],
operator: str = None) -> Tuple[Union[List[dtypes.typeclass], dtypes.typeclass, str], ...]:
datatypes = []
dtypes_for_result = []
for arg in arguments:
if isinstance(arg, (data.Array, data.Stream)):
datatypes.append(arg.dtype)
dtypes_for_result.append(arg.dtype.type)
elif isinstance(arg, data.Scalar):
datatypes.append(arg.dtype)
dtypes_for_result.append(_representative_num(arg.dtype))
elif isinstance(arg, (Number, np.bool_)):
datatypes.append(dtypes.DTYPE_TO_TYPECLASS[type(arg)])
dtypes_for_result.append(arg)
elif symbolic.issymbolic(arg):
datatypes.append(_sym_type(arg))
dtypes_for_result.append(_representative_num(_sym_type(arg)))
elif isinstance(arg, dtypes.typeclass):
datatypes.append(arg)
dtypes_for_result.append(_representative_num(arg))
else:
raise TypeError("Type {t} of argument {a} is not supported".format(t=type(arg), a=arg))
complex_types = {dace.complex64, dace.complex128, np.complex64, np.complex128}
float_types = {dace.float16, dace.float32, dace.float64, np.float16, np.float32, np.float64}
signed_types = {dace.int8, dace.int16, dace.int32, dace.int64, np.int8, np.int16, np.int32, np.int64}
# unsigned_types = {np.uint8, np.uint16, np.uint32, np.uint64}
coarse_types = []
for dtype in datatypes:
if dtype in complex_types:
coarse_types.append(3) # complex
elif dtype in float_types:
coarse_types.append(2) # float
elif dtype in signed_types:
coarse_types.append(1) # signed integer, bool
else:
coarse_types.append(0) # unsigned integer
casting = [None] * len(arguments)
if len(arguments) == 1: # Unary operators
if not operator:
result_type = datatypes[0]
elif operator == 'USub' and coarse_types[0] == 0:
result_type = eval('dace.int{}'.format(8 * datatypes[0].bytes))
elif operator == 'Abs' and coarse_types[0] == 3:
result_type = eval('dace.float{}'.format(4 * datatypes[0].bytes))
elif (operator in ('Fabs', 'Cbrt', 'Angles', 'SignBit', 'Spacing', 'Modf', 'Floor', 'Ceil', 'Trunc')
and coarse_types[0] == 3):
raise TypeError("ufunc '{}' not supported for complex input".format(operator))
elif (operator in ('Fabs', 'Rint', 'Exp', 'Log', 'Sqrt', 'Cbrt', 'Trigonometric', 'Angles', 'FpBoolean',
'Spacing', 'Modf', 'Floor', 'Ceil', 'Trunc') and coarse_types[0] < 2):
result_type = dace.float64
casting[0] = _cast_str(result_type)
elif operator in ('Frexp'):
if coarse_types[0] == 3:
raise TypeError("ufunc '{}' not supported for complex "
"input".format(operator))
result_type = [None, dace.int32]
if coarse_types[0] < 2:
result_type[0] = dace.float64
casting[0] = _cast_str(result_type[0])
else:
result_type[0] = datatypes[0]
elif _is_op_bitwise(operator) and coarse_types[0] > 1:
raise TypeError("unsupported operand type for {}: '{}'".format(operator, datatypes[0]))
elif _is_op_boolean(operator):
result_type = dace.bool_
if operator == 'SignBit' and coarse_types[0] < 2:
casting[0] = _cast_str(dace.float64)
else:
result_type = datatypes[0]
elif len(arguments) == 2: # Binary operators
type1 = coarse_types[0]
type2 = coarse_types[1]
dtype1 = datatypes[0]
dtype2 = datatypes[1]
max_bytes = max(dtype1.bytes, dtype2.bytes)
left_cast = None
right_cast = None
if _is_op_arithmetic(operator):
# Float/True division between integers
if operator == 'Div' and max(type1, type2) < 2:
# NOTE: Leaving this here in case we implement a C/C++ flag
# if type1 == type2 and type1 == 0: # Unsigned integers
# result_type = eval('dace.uint{}'.format(8 * max_bytes))
# else:
# result_type = eval('dace.int{}'.format(8 * max_bytes))
result_type = dace.float64
# Floor division with at least one complex argument
# NOTE: NumPy allows this operation
# elif operator == 'FloorDiv' and max(type1, type2) == 3:
# raise TypeError("can't take floor of complex number")
# Floor division with at least one float argument
elif operator == 'FloorDiv' and max(type1, type2) == 2:
if type1 == type2:
result_type = eval('dace.float{}'.format(8 * max_bytes))
else:
result_type = dace.float64
# Floor division between integers
elif operator == 'FloorDiv' and max(type1, type2) < 2:
if type1 == type2 and type1 == 0: # Unsigned integers
result_type = eval('dace.uint{}'.format(8 * max_bytes))
else:
result_type = eval('dace.int{}'.format(8 * max_bytes))
# Power with base integer and exponent signed integer
elif (operator == 'Pow' and max(type1, type2) < 2 and dtype2 in signed_types):
result_type = dace.float64
elif operator == 'FloatPow':
# Float power with integers or floats
if max(type1, type2) < 3:
result_type = dace.float64
# Float power with complex numbers
else:
result_type = dace.complex128
elif (operator in ('Heaviside', 'Arctan2', 'Hypot') and max(type1, type2) == 3):
raise TypeError("ufunc '{}' not supported for complex input".format(operator))
elif (operator in ('Heaviside', 'Arctan2', 'Hypot') and max(type1, type2) < 2):
result_type = dace.float64
# All other arithmetic operators and cases of the above operators
else:
result_type = _np_result_type(dtypes_for_result)
if dtype1 != result_type:
left_cast = _cast_str(result_type)
if dtype2 != result_type:
right_cast = _cast_str(result_type)
elif _is_op_bitwise(operator):
type1 = coarse_types[0]
type2 = coarse_types[1]
dtype1 = datatypes[0]
dtype2 = datatypes[1]
# Only integers may be arguments of bitwise and shifting operations
if max(type1, type2) > 1:
raise TypeError("unsupported operand type(s) for {}: "
"'{}' and '{}'".format(operator, dtype1, dtype2))
result_type = _np_result_type(dtypes_for_result)
if dtype1 != result_type:
left_cast = _cast_str(result_type)
if dtype2 != result_type:
right_cast = _cast_str(result_type)
elif _is_op_boolean(operator):
result_type = dace.bool_
elif operator in ('Gcd', 'Lcm'):
if max(type1, type2) > 1:
raise TypeError("unsupported operand type(s) for {}: "
"'{}' and '{}'".format(operator, dtype1, dtype2))
result_type = _np_result_type(dtypes_for_result)
if dtype1 != result_type:
left_cast = _cast_str(result_type)
if dtype2 != result_type:
right_cast = _cast_str(result_type)
elif operator and operator in ('CopySign', 'NextAfter'):
if max(type1, type2) > 2:
raise TypeError("unsupported operand type(s) for {}: "
"'{}' and '{}'".format(operator, dtype1, dtype2))
if max(type1, type2) < 2:
result_type = dace.float64
else:
result_type = _np_result_type(dtypes_for_result)
if dtype1 != result_type:
left_cast = _cast_str(result_type)
if dtype2 != result_type:
right_cast = _cast_str(result_type)
elif operator and operator in ('Ldexp'):
if max(type1, type2) > 2 or type2 > 1:
raise TypeError("unsupported operand type(s) for {}: "
"'{}' and '{}'".format(operator, dtype1, dtype2))
if type1 < 2:
result_type = dace.float64
left_cast = _cast_str(result_type)
else:
result_type = dtype1
if dtype2 != dace.int32:
right_cast = _cast_str(dace.int32)
if not np.can_cast(dtype2.type, np.int32):
warnings.warn("Second input to {} is of type {}, which "
"cannot be safely cast to {}".format(operator, dtype2, dace.int32))
else: # Other binary operators
result_type = _np_result_type(dtypes_for_result)
if dtype1 != result_type:
left_cast = _cast_str(result_type)
if dtype2 != result_type:
right_cast = _cast_str(result_type)
casting = [left_cast, right_cast]
else: # Operators with 3 or more arguments
result_type = _np_result_type(dtypes_for_result)
for i, t in enumerate(coarse_types):
if t != result_type:
casting[i] = _cast_str(result_type)
return result_type, casting
def _array_array_binop(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, left_operand: str, right_operand: str,
operator: str, opcode: str):
'''Both operands are Arrays (or Data in general)'''
left_arr = sdfg.arrays[left_operand]
right_arr = sdfg.arrays[right_operand]
left_type = left_arr.dtype
right_type = right_arr.dtype
# Implicit Python coversion implemented as casting
arguments = [left_arr, right_arr]
tasklet_args = ['__in1', '__in2']
result_type, casting = _result_type(arguments, operator)
left_cast = casting[0]
right_cast = casting[1]
if left_cast is not None:
tasklet_args[0] = "{}(__in1)".format(str(left_cast).replace('::', '.'))
if right_cast is not None:
tasklet_args[1] = "{}(__in2)".format(str(right_cast).replace('::', '.'))
left_shape = left_arr.shape
right_shape = right_arr.shape
(out_shape, all_idx_dict, out_idx, left_idx, right_idx) = _broadcast_together(left_shape, right_shape)
# Fix for Scalars
if isinstance(left_arr, data.Scalar):
left_idx = subsets.Range([(0, 0, 1)])
if isinstance(right_arr, data.Scalar):
right_idx = subsets.Range([(0, 0, 1)])
out_operand, out_arr = sdfg.add_temp_transient(out_shape, result_type, left_arr.storage)
if list(out_shape) == [1]:
tasklet = state.add_tasklet('_%s_' % operator, {'__in1', '__in2'}, {'__out'},
'__out = {i1} {op} {i2}'.format(i1=tasklet_args[0], op=opcode, i2=tasklet_args[1]))
n1 = state.add_read(left_operand)
n2 = state.add_read(right_operand)
n3 = state.add_write(out_operand)
state.add_edge(n1, None, tasklet, '__in1', dace.Memlet.from_array(left_operand, left_arr))
state.add_edge(n2, None, tasklet, '__in2', dace.Memlet.from_array(right_operand, right_arr))
state.add_edge(tasklet, '__out', n3, None, dace.Memlet.from_array(out_operand, out_arr))
else:
state.add_mapped_tasklet("_%s_" % operator,
all_idx_dict, {
'__in1': Memlet.simple(left_operand, left_idx),
'__in2': Memlet.simple(right_operand, right_idx)
},
'__out = {i1} {op} {i2}'.format(i1=tasklet_args[0], op=opcode, i2=tasklet_args[1]),
{'__out': Memlet.simple(out_operand, out_idx)},
external_edges=True)
return out_operand
def _array_const_binop(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, left_operand: str, right_operand: str,
operator: str, opcode: str):
'''Operands are an Array and a Constant'''
if left_operand in sdfg.arrays:
left_arr = sdfg.arrays[left_operand]
left_type = left_arr.dtype
left_shape = left_arr.shape
storage = left_arr.storage
right_arr = None
right_type = dtypes.DTYPE_TO_TYPECLASS[type(right_operand)]
right_shape = [1]
arguments = [left_arr, right_operand]
tasklet_args = ['__in1', f'({str(right_operand)})']
else:
left_arr = None
left_type = dtypes.DTYPE_TO_TYPECLASS[type(left_operand)]
left_shape = [1]
right_arr = sdfg.arrays[right_operand]
right_type = right_arr.dtype
right_shape = right_arr.shape
storage = right_arr.storage
arguments = [left_operand, right_arr]
tasklet_args = [f'({str(left_operand)})', '__in2']
result_type, casting = _result_type(arguments, operator)
left_cast = casting[0]
right_cast = casting[1]
if left_cast is not None:
tasklet_args[0] = "{c}({o})".format(c=str(left_cast).replace('::', '.'), o=tasklet_args[0])
if right_cast is not None:
tasklet_args[1] = "{c}({o})".format(c=str(right_cast).replace('::', '.'), o=tasklet_args[1])
(out_shape, all_idx_dict, out_idx, left_idx, right_idx) = _broadcast_together(left_shape, right_shape)
out_operand, out_arr = sdfg.add_temp_transient(out_shape, result_type, storage)
if list(out_shape) == [1]:
if left_arr:
inp_conn = {'__in1'}
n1 = state.add_read(left_operand)
else:
inp_conn = {'__in2'}
n2 = state.add_read(right_operand)
tasklet = state.add_tasklet('_%s_' % operator, inp_conn, {'__out'},
'__out = {i1} {op} {i2}'.format(i1=tasklet_args[0], op=opcode, i2=tasklet_args[1]))
n3 = state.add_write(out_operand)
if left_arr:
state.add_edge(n1, None, tasklet, '__in1', dace.Memlet.from_array(left_operand, left_arr))
else:
state.add_edge(n2, None, tasklet, '__in2', dace.Memlet.from_array(right_operand, right_arr))
state.add_edge(tasklet, '__out', n3, None, dace.Memlet.from_array(out_operand, out_arr))
else:
if left_arr:
inp_memlets = {'__in1': Memlet.simple(left_operand, left_idx)}
else:
inp_memlets = {'__in2': Memlet.simple(right_operand, right_idx)}
state.add_mapped_tasklet("_%s_" % operator,
all_idx_dict,
inp_memlets,
'__out = {i1} {op} {i2}'.format(i1=tasklet_args[0], op=opcode, i2=tasklet_args[1]),
{'__out': Memlet.simple(out_operand, out_idx)},
external_edges=True)
return out_operand
def _array_sym_binop(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, left_operand: str, right_operand: str,
operator: str, opcode: str):
'''Operands are an Array and a Symbol'''
if left_operand in sdfg.arrays:
left_arr = sdfg.arrays[left_operand]
left_type = left_arr.dtype
left_shape = left_arr.shape
storage = left_arr.storage
right_arr = None
right_type = _sym_type(right_operand)
right_shape = [1]
arguments = [left_arr, right_operand]
tasklet_args = ['__in1', f'({astutils.unparse(right_operand)})']
else:
left_arr = None
left_type = _sym_type(left_operand)
left_shape = [1]
right_arr = sdfg.arrays[right_operand]
right_type = right_arr.dtype
right_shape = right_arr.shape
storage = right_arr.storage
arguments = [left_operand, right_arr]
tasklet_args = [f'({astutils.unparse(left_operand)})', '__in2']
result_type, casting = _result_type(arguments, operator)
left_cast = casting[0]
right_cast = casting[1]
if left_cast is not None:
tasklet_args[0] = "{c}({o})".format(c=str(left_cast).replace('::', '.'), o=tasklet_args[0])
if right_cast is not None:
tasklet_args[1] = "{c}({o})".format(c=str(right_cast).replace('::', '.'), o=tasklet_args[1])
(out_shape, all_idx_dict, out_idx, left_idx, right_idx) = _broadcast_together(left_shape, right_shape)
out_operand, out_arr = sdfg.add_temp_transient(out_shape, result_type, storage)
if list(out_shape) == [1]:
if left_arr:
inp_conn = {'__in1'}
n1 = state.add_read(left_operand)
else:
inp_conn = {'__in2'}
n2 = state.add_read(right_operand)
tasklet = state.add_tasklet('_%s_' % operator, inp_conn, {'__out'},
'__out = {i1} {op} {i2}'.format(i1=tasklet_args[0], op=opcode, i2=tasklet_args[1]))
n3 = state.add_write(out_operand)
if left_arr:
state.add_edge(n1, None, tasklet, '__in1', dace.Memlet.from_array(left_operand, left_arr))
else:
state.add_edge(n2, None, tasklet, '__in2', dace.Memlet.from_array(right_operand, right_arr))
state.add_edge(tasklet, '__out', n3, None, dace.Memlet.from_array(out_operand, out_arr))
else:
if left_arr:
inp_memlets = {'__in1': Memlet.simple(left_operand, left_idx)}
else:
inp_memlets = {'__in2': Memlet.simple(right_operand, right_idx)}
state.add_mapped_tasklet("_%s_" % operator,
all_idx_dict,
inp_memlets,
'__out = {i1} {op} {i2}'.format(i1=tasklet_args[0], op=opcode, i2=tasklet_args[1]),
{'__out': Memlet.simple(out_operand, out_idx)},
external_edges=True)
return out_operand
def _scalar_scalar_binop(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, left_operand: str, right_operand: str,
operator: str, opcode: str):
'''Both operands are Scalars'''
left_scal = sdfg.arrays[left_operand]
right_scal = sdfg.arrays[right_operand]
left_type = left_scal.dtype
right_type = right_scal.dtype
# Implicit Python coversion implemented as casting
arguments = [left_scal, right_scal]
tasklet_args = ['__in1', '__in2']
result_type, casting = _result_type(arguments, operator)
left_cast = casting[0]
right_cast = casting[1]
if left_cast is not None:
tasklet_args[0] = "{}(__in1)".format(str(left_cast).replace('::', '.'))
if right_cast is not None:
tasklet_args[1] = "{}(__in2)".format(str(right_cast).replace('::', '.'))
out_operand = sdfg.temp_data_name()
_, out_scal = sdfg.add_scalar(out_operand, result_type, transient=True, storage=left_scal.storage)
tasklet = state.add_tasklet('_%s_' % operator, {'__in1', '__in2'}, {'__out'},
'__out = {i1} {op} {i2}'.format(i1=tasklet_args[0], op=opcode, i2=tasklet_args[1]))
n1 = state.add_read(left_operand)
n2 = state.add_read(right_operand)
n3 = state.add_write(out_operand)
state.add_edge(n1, None, tasklet, '__in1', dace.Memlet.from_array(left_operand, left_scal))
state.add_edge(n2, None, tasklet, '__in2', dace.Memlet.from_array(right_operand, right_scal))
state.add_edge(tasklet, '__out', n3, None, dace.Memlet.from_array(out_operand, out_scal))
return out_operand
def _scalar_const_binop(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, left_operand: str, right_operand: str,
operator: str, opcode: str):
'''Operands are a Scalar and a Constant'''
if left_operand in sdfg.arrays:
left_scal = sdfg.arrays[left_operand]
storage = left_scal.storage
right_scal = None
arguments = [left_scal, right_operand]
tasklet_args = ['__in1', f'({str(right_operand)})']
else:
left_scal = None
right_scal = sdfg.arrays[right_operand]
storage = right_scal.storage
arguments = [left_operand, right_scal]
tasklet_args = [f'({str(left_operand)})', '__in2']
result_type, casting = _result_type(arguments, operator)
left_cast = casting[0]
right_cast = casting[1]
if left_cast is not None:
tasklet_args[0] = "{c}({o})".format(c=str(left_cast).replace('::', '.'), o=tasklet_args[0])
if right_cast is not None:
tasklet_args[1] = "{c}({o})".format(c=str(right_cast).replace('::', '.'), o=tasklet_args[1])
out_operand = sdfg.temp_data_name()
_, out_scal = sdfg.add_scalar(out_operand, result_type, transient=True, storage=storage)
if left_scal:
inp_conn = {'__in1'}
n1 = state.add_read(left_operand)
else:
inp_conn = {'__in2'}
n2 = state.add_read(right_operand)
tasklet = state.add_tasklet('_%s_' % operator, inp_conn, {'__out'},
'__out = {i1} {op} {i2}'.format(i1=tasklet_args[0], op=opcode, i2=tasklet_args[1]))
n3 = state.add_write(out_operand)
if left_scal:
state.add_edge(n1, None, tasklet, '__in1', dace.Memlet.from_array(left_operand, left_scal))
else:
state.add_edge(n2, None, tasklet, '__in2', dace.Memlet.from_array(right_operand, right_scal))
state.add_edge(tasklet, '__out', n3, None, dace.Memlet.from_array(out_operand, out_scal))
return out_operand
def _scalar_sym_binop(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, left_operand: str, right_operand: str,
operator: str, opcode: str):
'''Operands are a Scalar and a Symbol'''
if left_operand in sdfg.arrays:
left_scal = sdfg.arrays[left_operand]
left_type = left_scal.dtype
storage = left_scal.storage
right_scal = None
right_type = _sym_type(right_operand)
arguments = [left_scal, right_operand]
tasklet_args = ['__in1', f'({astutils.unparse(right_operand)})']
else:
left_scal = None
left_type = _sym_type(left_operand)
right_scal = sdfg.arrays[right_operand]
right_type = right_scal.dtype
storage = right_scal.storage
arguments = [left_operand, right_scal]
tasklet_args = [f'({astutils.unparse(left_operand)})', '__in2']
result_type, casting = _result_type(arguments, operator)
left_cast = casting[0]
right_cast = casting[1]
if left_cast is not None:
tasklet_args[0] = "{c}({o})".format(c=str(left_cast).replace('::', '.'), o=tasklet_args[0])
if right_cast is not None:
tasklet_args[1] = "{c}({o})".format(c=str(right_cast).replace('::', '.'), o=tasklet_args[1])
out_operand = sdfg.temp_data_name()
_, out_scal = sdfg.add_scalar(out_operand, result_type, transient=True, storage=storage)
if left_scal:
inp_conn = {'__in1'}
n1 = state.add_read(left_operand)
else:
inp_conn = {'__in2'}
n2 = state.add_read(right_operand)
tasklet = state.add_tasklet('_%s_' % operator, inp_conn, {'__out'},
'__out = {i1} {op} {i2}'.format(i1=tasklet_args[0], op=opcode, i2=tasklet_args[1]))
n3 = state.add_write(out_operand)
if left_scal:
state.add_edge(n1, None, tasklet, '__in1', dace.Memlet.from_array(left_operand, left_scal))
else:
state.add_edge(n2, None, tasklet, '__in2', dace.Memlet.from_array(right_operand, right_scal))
state.add_edge(tasklet, '__out', n3, None, dace.Memlet.from_array(out_operand, out_scal))
return out_operand
_pyop2symtype = {
# Boolean ops
"and": sp.And,
"or": sp.Or,
"not": sp.Not,
# Comparsion ops
"==": sp.Equality,
"!=": sp.Unequality,
">=": sp.GreaterThan,
"<=": sp.LessThan,
">": sp.StrictGreaterThan,
"<": sp.StrictLessThan
}
def _const_const_binop(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, left_operand: str, right_operand: str,
operator: str, opcode: str):
'''Both operands are Constants or Symbols'''
_, casting = _result_type([left_operand, right_operand], operator)
left_cast = casting[0]
right_cast = casting[1]
if isinstance(left_operand, (Number, np.bool_)) and left_cast is not None:
left = eval(left_cast)(left_operand)
else:
left = left_operand
if isinstance(right_operand, (Number, np.bool_)) and right_cast is not None:
right = eval(right_cast)(right_operand)
else:
right = right_operand
# Support for SymPy expressions
if isinstance(left, sp.Basic) or isinstance(right, sp.Basic):
if opcode in _pyop2symtype.keys():
try:
return _pyop2symtype[opcode](left, right)
except TypeError:
# This may happen in cases such as `False or (N + 1)`.
# (N + 1) is a symbolic expressions, but because it is not
# boolean, SymPy returns TypeError when trying to create
# `sympy.Or(False, N + 1)`. In such a case, we try again with
# the normal Python operator.
pass
expr = 'l {o} r'.format(o=opcode)
vars = {'l': left, 'r': right}
return eval(expr, vars)
def _makebinop(op, opcode):
@oprepo.replaces_operator('Array', op, otherclass='Array')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_array_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Array', op, otherclass='View')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_array_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Array', op, otherclass='Scalar')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_array_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Array', op, otherclass='NumConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Array', op, otherclass='BoolConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Array', op, otherclass='symbol')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_sym_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('View', op, otherclass='View')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_array_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('View', op, otherclass='Array')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_array_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('View', op, otherclass='Scalar')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_array_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('View', op, otherclass='NumConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('View', op, otherclass='BoolConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('View', op, otherclass='symbol')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_sym_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Scalar', op, otherclass='Array')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_array_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Scalar', op, otherclass='View')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_array_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Scalar', op, otherclass='Scalar')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _scalar_scalar_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Scalar', op, otherclass='NumConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _scalar_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Scalar', op, otherclass='BoolConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _scalar_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('Scalar', op, otherclass='symbol')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _scalar_sym_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('NumConstant', op, otherclass='Array')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('NumConstant', op, otherclass='View')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('NumConstant', op, otherclass='Scalar')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _scalar_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('NumConstant', op, otherclass='NumConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _const_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('NumConstant', op, otherclass='BoolConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _const_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('NumConstant', op, otherclass='symbol')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _const_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('BoolConstant', op, otherclass='Array')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('BoolConstant', op, otherclass='View')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('BoolConstant', op, otherclass='Scalar')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _scalar_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('BoolConstant', op, otherclass='NumConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _const_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('BoolConstant', op, otherclass='BoolConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _const_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('BoolConstant', op, otherclass='symbol')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _const_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('symbol', op, otherclass='Array')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_sym_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('symbol', op, otherclass='View')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _array_sym_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('symbol', op, otherclass='Scalar')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _scalar_sym_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('symbol', op, otherclass='NumConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _const_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('symbol', op, otherclass='BoolConstant')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _const_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
@oprepo.replaces_operator('symbol', op, otherclass='symbol')
def _op(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
return _const_const_binop(visitor, sdfg, state, op1, op2, op, opcode)
# Define all standard Python unary operators
for op, opcode in [('UAdd', '+'), ('USub', '-'), ('Not', 'not'), ('Invert', '~')]:
_makeunop(op, opcode)
# Define all standard Python binary operators
# NOTE: ('MatMult', '@') is defined separately
for op, opcode in [('Add', '+'), ('Sub', '-'), ('Mult', '*'), ('Div', '/'), ('FloorDiv', '//'), ('Mod', '%'),
('Pow', '**'), ('LShift', '<<'), ('RShift', '>>'), ('BitOr', '|'), ('BitXor', '^'), ('BitAnd', '&'),
('And', 'and'), ('Or', 'or'), ('Eq', '=='), ('NotEq', '!='), ('Lt', '<'), ('LtE', '<='), ('Gt', '>'),
('GtE', '>='), ('Is', 'is'), ('IsNot', 'is not')]:
_makebinop(op, opcode)
@oprepo.replaces_operator('Array', 'MatMult')
@oprepo.replaces_operator('View', 'MatMult')
@oprepo.replaces_operator('Array', 'MatMult', 'View')
@oprepo.replaces_operator('View', 'MatMult', 'Array')
def _matmult(visitor: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op1: str, op2: str):
from dace.libraries.blas.nodes.matmul import MatMul # Avoid import loop
arr1 = sdfg.arrays[op1]
arr2 = sdfg.arrays[op2]
if len(arr1.shape) > 1 and len(arr2.shape) > 1: # matrix * matrix
if len(arr1.shape) > 3 or len(arr2.shape) > 3:
raise SyntaxError('Matrix multiplication of tensors of dimensions > 3 '
'not supported')
if arr1.shape[-1] != arr2.shape[-2]:
raise SyntaxError('Matrix dimension mismatch %s != %s' % (arr1.shape[-1], arr2.shape[-2]))
from dace.libraries.blas.nodes.matmul import _get_batchmm_opts
# Determine batched multiplication
bopt = _get_batchmm_opts(arr1.shape, arr1.strides, arr2.shape, arr2.strides, None, None)
if bopt:
output_shape = (bopt['b'], arr1.shape[-2], arr2.shape[-1])
else:
output_shape = (arr1.shape[-2], arr2.shape[-1])
elif len(arr1.shape) == 2 and len(arr2.shape) == 1: # matrix * vector
if arr1.shape[1] != arr2.shape[0]:
raise SyntaxError("Number of matrix columns {} must match"
"size of vector {}.".format(arr1.shape[1], arr2.shape[0]))
output_shape = (arr1.shape[0], )
elif len(arr1.shape) == 1 and len(arr2.shape) == 2: # vector * matrix
if arr1.shape[0] != arr2.shape[0]:
raise SyntaxError("Size of vector {} must match number of matrix "
"rows {} must match".format(arr1.shape[0], arr2.shape[0]))
output_shape = (arr2.shape[1], )
elif len(arr1.shape) == 1 and len(arr2.shape) == 1: # vector * vector
if arr1.shape[0] != arr2.shape[0]:
raise SyntaxError("Vectors in vector product must have same size: "
"{} vs. {}".format(arr1.shape[0], arr2.shape[0]))
output_shape = (1, )
else: # Dunno what this is, bail
raise SyntaxError("Cannot multiply arrays with shapes: {} and {}".format(arr1.shape, arr2.shape))
type1 = arr1.dtype.type
type2 = arr2.dtype.type
restype = dace.DTYPE_TO_TYPECLASS[np.result_type(type1, type2).type]
op3, arr3 = sdfg.add_temp_transient(output_shape, restype, arr1.storage)
acc1 = state.add_read(op1)
acc2 = state.add_read(op2)
acc3 = state.add_write(op3)
tasklet = MatMul('_MatMult_')
state.add_node(tasklet)
state.add_edge(acc1, None, tasklet, '_a', dace.Memlet.from_array(op1, arr1))
state.add_edge(acc2, None, tasklet, '_b', dace.Memlet.from_array(op2, arr2))
state.add_edge(tasklet, '_c', acc3, None, dace.Memlet.from_array(op3, arr3))
return op3
# NumPy ufunc support #########################################################
UfuncInput = Union[str, Number, sp.Basic]
UfuncOutput = Union[str, None]
# TODO: Add all ufuncs in subsequent PR's.
ufuncs = dict(
add=dict(name="_numpy_add_",
operator="Add",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 + __in2",
reduce="lambda a, b: a + b",
initial=np.add.identity),
subtract=dict(name="_numpy_subtract_",
operator="Sub",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 - __in2",
reduce="lambda a, b: a - b",
initial=np.subtract.identity),
multiply=dict(name="_numpy_multiply_",
operator="Mul",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 * __in2",
reduce="lambda a, b: a * b",
initial=np.multiply.identity),
divide=dict(name="_numpy_divide_",
operator="Div",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 / __in2",
reduce="lambda a, b: a / b",
initial=np.divide.identity),
logaddexp=dict(name="_numpy_logaddexp_",
operator=None,
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = log( exp(__in1) + exp(__in2) )",
reduce="lambda a, b: log( exp(a) + exp(b) )",
initial=np.logaddexp.identity),
logaddexp2=dict(name="_numpy_logaddexp2_",
operator=None,
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = log2( exp2(__in1) + exp2(__in2) )",
reduce="lambda a, b: log( exp2(a) + exp2(b) )",
initial=np.logaddexp2.identity),
true_divide=dict(name="_numpy_true_divide_",
operator="Div",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 / __in2",
reduce="lambda a, b: a / b",
initial=np.true_divide.identity),
floor_divide=dict(name="_numpy_floor_divide_",
operator="FloorDiv",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = py_floor(__in1, __in2)",
reduce="lambda a, b: py_floor(a, b)",
initial=np.floor_divide.identity),
negative=dict(name="_numpy_negative_",
operator="USub",
inputs=["__in1"],
outputs=["__out"],
code="__out = - __in1",
reduce=None,
initial=np.negative.identity),
positive=dict(name="_numpy_positive_",
operator="UAdd",
inputs=["__in1"],
outputs=["__out"],
code="__out = + __in1",
reduce=None,
initial=np.positive.identity),
power=dict(name="_numpy_power_",
operator="Pow",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 ** __in2",
reduce="lambda a, b: a ** b",
initial=np.power.identity),
float_power=dict(name="_numpy_float_power_",
operator="FloatPow",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = np_float_pow(__in1, __in2)",
reduce="lambda a, b: np_float_pow(a, b)",
initial=np.float_power.identity),
remainder=dict(name="_numpy_remainder_",
operator="Mod",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = py_mod(__in1, __in2)",
reduce="lambda a, b: py_mod(a, b)",
initial=np.remainder.identity),
mod=dict(name="_numpy_mod_",
operator="Mod",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = py_mod(__in1, __in2)",
reduce="lambda a, b: py_mod(a, b)",
initial=np.mod.identity),
fmod=dict(name="_numpy_fmod_",
operator="Mod",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = cpp_mod(__in1, __in2)",
reduce="lambda a, b: cpp_mod(a, b)",
initial=np.fmod.identity),
divmod=dict(name="_numpy_divmod_",
operator="Div",
inputs=["__in1", "__in2"],
outputs=["__out1", "__out2"],
code="py_divmod(__in1, __in2, __out1, __out2)",
reduce=None,
initial=np.divmod.identity),
absolute=dict(name="_numpy_absolute_",
operator="Abs",
inputs=["__in1"],
outputs=["__out"],
code="__out = abs(__in1)",
reduce=None,
initial=np.absolute.identity),
abs=dict(name="_numpy_abs_",
operator="Abs",
inputs=["__in1"],
outputs=["__out"],
code="__out = abs(__in1)",
reduce=None,
initial=np.abs.identity),
fabs=dict(name="_numpy_fabs_",
operator="Fabs",
inputs=["__in1"],
outputs=["__out"],
code="__out = fabs(__in1)",
reduce=None,
initial=np.fabs.identity),
rint=dict(name="_numpy_rint_",
operator="Rint",
inputs=["__in1"],
outputs=["__out"],
code="__out = round(__in1)",
reduce=None,
initial=np.rint.identity),
sign=dict(name="_numpy_sign_",
operator=None,
inputs=["__in1"],
outputs=["__out"],
code="__out = sign(__in1)",
reduce=None,
initial=np.sign.identity),
heaviside=dict(name="_numpy_heaviside_",
operator="Heaviside",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = heaviside(__in1, __in2)",
reduce="lambda a, b: heaviside(a, b)",
initial=np.heaviside.identity),
conj=dict(name="_numpy_conj_",
operator=None,
inputs=["__in1"],
outputs=["__out"],
code="__out = conj(__in1)",
reduce=None,
initial=np.conj.identity),
conjugate=dict(name="_numpy_conjugate_",
operator=None,
inputs=["__in1"],
outputs=["__out"],
code="__out = conj(__in1)",
reduce=None,
initial=np.conjugate.identity),
exp=dict(name="_numpy_exp_",
operator="Exp",
inputs=["__in1"],
outputs=["__out"],
code="__out = exp(__in1)",
reduce=None,
initial=np.exp.identity),
exp2=dict(name="_numpy_exp2_",
operator="Exp",
inputs=["__in1"],
outputs=["__out"],
code="__out = exp2(__in1)",
reduce=None,
initial=np.exp2.identity),
log=dict(name="_numpy_log_",
operator="Log",
inputs=["__in1"],
outputs=["__out"],
code="__out = log(__in1)",
reduce=None,
initial=np.log.identity),
log2=dict(name="_numpy_log2_",
operator="Log",
inputs=["__in1"],
outputs=["__out"],
code="__out = log2(__in1)",
reduce=None,
initial=np.log2.identity),
log10=dict(name="_numpy_log10_",
operator="Log",
inputs=["__in1"],
outputs=["__out"],
code="__out = log10(__in1)",
reduce=None,
initial=np.log10.identity),
expm1=dict(name="_numpy_expm1_",
operator="Exp",
inputs=["__in1"],
outputs=["__out"],
code="__out = expm1(__in1)",
reduce=None,
initial=np.expm1.identity),
log1p=dict(name="_numpy_log1p_",
operator="Log",
inputs=["__in1"],
outputs=["__out"],
code="__out = log1p(__in1)",
reduce=None,
initial=np.log1p.identity),
sqrt=dict(name="_numpy_sqrt_",
operator="Sqrt",
inputs=["__in1"],
outputs=["__out"],
code="__out = sqrt(__in1)",
reduce=None,
initial=np.sqrt.identity),
square=dict(name="_numpy_square_",
operator=None,
inputs=["__in1"],
outputs=["__out"],
code="__out = __in1 * __in1",
reduce=None,
initial=np.square.identity),
cbrt=dict(name="_numpy_cbrt_",
operator="Cbrt",
inputs=["__in1"],
outputs=["__out"],
code="__out = cbrt(__in1)",
reduce=None,
initial=np.cbrt.identity),
reciprocal=dict(name="_numpy_reciprocal_",
operator="Div",
inputs=["__in1"],
outputs=["__out"],
code="__out = reciprocal(__in1)",
reduce=None,
initial=np.reciprocal.identity),
gcd=dict(name="_numpy_gcd_",
operator="Gcd",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = gcd(__in1, __in2)",
reduce="lambda a, b: gcd(a, b)",
initial=np.gcd.identity),
lcm=dict(name="_numpy_lcm_",
operator="Lcm",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = lcm(__in1, __in2)",
reduce="lambda a, b: lcm(a, b)",
initial=np.lcm.identity),
sin=dict(name="_numpy_sin_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = sin(__in1)",
reduce=None,
initial=np.sin.identity),
cos=dict(name="_numpy_cos_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = cos(__in1)",
reduce=None,
initial=np.cos.identity),
tan=dict(name="_numpy_tan_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = tan(__in1)",
reduce=None,
initial=np.tan.identity),
arcsin=dict(name="_numpy_arcsin_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = asin(__in1)",
reduce=None,
initial=np.arcsin.identity),
arccos=dict(name="_numpy_arccos_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = acos(__in1)",
reduce=None,
initial=np.arccos.identity),
arctan=dict(name="_numpy_arctan_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = atan(__in1)",
reduce=None,
initial=np.arctan.identity),
sinh=dict(name="_numpy_sinh_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = sinh(__in1)",
reduce=None,
initial=np.sinh.identity),
cosh=dict(name="_numpy_cosh_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = cosh(__in1)",
reduce=None,
initial=np.cosh.identity),
tanh=dict(name="_numpy_tanh_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = tanh(__in1)",
reduce=None,
initial=np.tanh.identity),
arcsinh=dict(name="_numpy_arcsinh_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = asinh(__in1)",
reduce=None,
initial=np.arcsinh.identity),
arccosh=dict(name="_numpy_arccosh_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = acosh(__in1)",
reduce=None,
initial=np.arccos.identity),
arctanh=dict(name="_numpy_arctanh_",
operator="Trigonometric",
inputs=["__in1"],
outputs=["__out"],
code="__out = atanh(__in1)",
reduce=None,
initial=np.arctanh.identity),
arctan2=dict(name="_numpy_arctan2_",
operator="Arctan2",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = atan2(__in1, __in2)",
reduce="lambda a, b: atan2(a, b)",
initial=np.arctan2.identity),
hypot=dict(name="_numpy_hypot_",
operator="Hypot",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = hypot(__in1, __in2)",
reduce="lambda a, b: hypot(a, b)",
initial=np.arctan2.identity),
degrees=dict(name="_numpy_degrees_",
operator="Angles",
inputs=["__in1"],
outputs=["__out"],
code="__out = rad2deg(__in1)",
reduce=None,
initial=np.degrees.identity),
rad2deg=dict(name="_numpy_rad2deg_",
operator="Angles",
inputs=["__in1"],
outputs=["__out"],
code="__out = rad2deg(__in1)",
reduce=None,
initial=np.rad2deg.identity),
radians=dict(name="_numpy_radians_",
operator="Angles",
inputs=["__in1"],
outputs=["__out"],
code="__out = deg2rad(__in1)",
reduce=None,
initial=np.radians.identity),
deg2rad=dict(name="_numpy_deg2rad_",
operator="Angles",
inputs=["__in1"],
outputs=["__out"],
code="__out = deg2rad(__in1)",
reduce=None,
initial=np.deg2rad.identity),
bitwise_and=dict(name="_numpy_bitwise_and_",
operator="BitAnd",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 & __in2",
reduce="lambda a, b: a & b",
initial=np.bitwise_and.identity),
bitwise_or=dict(name="_numpy_bitwise_or_",
operator="BitOr",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 | __in2",
reduce="lambda a, b: a | b",
initial=np.bitwise_or.identity),
bitwise_xor=dict(name="_numpy_bitwise_xor_",
operator="BitXor",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 ^ __in2",
reduce="lambda a, b: a ^ b",
initial=np.bitwise_xor.identity),
invert=dict(name="_numpy_invert_",
operator="Invert",
inputs=["__in1"],
outputs=["__out"],
code="__out = ~ __in1",
reduce=None,
initial=np.invert.identity),
left_shift=dict(name="_numpy_left_shift_",
operator="LShift",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 << __in2",
reduce="lambda a, b: a << b",
initial=np.left_shift.identity),
right_shift=dict(name="_numpy_right_shift_",
operator="RShift",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 >> __in2",
reduce="lambda a, b: a >> b",
initial=np.right_shift.identity),
greater=dict(name="_numpy_greater_",
operator="Gt",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 > __in2",
reduce="lambda a, b: a > b",
initial=np.greater.identity),
greater_equal=dict(name="_numpy_greater_equal_",
operator="GtE",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 >= __in2",
reduce="lambda a, b: a >= b",
initial=np.greater_equal.identity),
less=dict(name="_numpy_less_",
operator="Lt",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 < __in2",
reduce="lambda a, b: a < b",
initial=np.less.identity),
less_equal=dict(name="_numpy_less_equal_",
operator="LtE",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 <= __in2",
reduce="lambda a, b: a <= b",
initial=np.less_equal.identity),
equal=dict(name="_numpy_equal_",
operator="Eq",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 == __in2",
reduce="lambda a, b: a == b",
initial=np.equal.identity),
not_equal=dict(name="_numpy_not_equal_",
operator="NotEq",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 != __in2",
reduce="lambda a, b: a != b",
initial=np.not_equal.identity),
logical_and=dict(name="_numpy_logical_and_",
operator="And",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 and __in2",
reduce="lambda a, b: a and b",
initial=np.logical_and.identity),
logical_or=dict(name="_numpy_logical_or_",
operator="Or",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = __in1 or __in2",
reduce="lambda a, b: a or b",
initial=np.logical_or.identity),
logical_xor=dict(name="_numpy_logical_xor_",
operator="Xor",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = (not __in1) != (not __in2)",
reduce="lambda a, b: (not a) != (not b)",
initial=np.logical_xor.identity),
logical_not=dict(name="_numpy_logical_not_",
operator="Not",
inputs=["__in1"],
outputs=["__out"],
code="__out = not __in1",
reduce=None,
initial=np.logical_not.identity),
maximum=dict(name="_numpy_maximum_",
operator=None,
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = max(__in1, __in2)",
reduce="lambda a, b: max(a, b)",
initial=-np.inf), # np.maximum.identity is None
fmax=dict(name="_numpy_fmax_",
operator=None,
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = fmax(__in1, __in2)",
reduce="lambda a, b: fmax(a, b)",
initial=-np.inf), # np.fmax.identity is None
minimum=dict(name="_numpy_minimum_",
operator=None,
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = min(__in1, __in2)",
reduce="lambda a, b: min(a, b)",
initial=np.inf), # np.minimum.identity is None
fmin=dict(name="_numpy_fmin_",
operator=None,
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = fmin(__in1, __in2)",
reduce="lambda a, b: fmin(a, b)",
initial=np.inf), # np.fmin.identity is None
isfinite=dict(name="_numpy_isfinite_",
operator="FpBoolean",
inputs=["__in1"],
outputs=["__out"],
code="__out = isfinite(__in1)",
reduce=None,
initial=np.isfinite.identity),
isinf=dict(name="_numpy_isinf_",
operator="FpBoolean",
inputs=["__in1"],
outputs=["__out"],
code="__out = isinf(__in1)",
reduce=None,
initial=np.isinf.identity),
isnan=dict(name="_numpy_isnan_",
operator="FpBoolean",
inputs=["__in1"],
outputs=["__out"],
code="__out = isnan(__in1)",
reduce=None,
initial=np.isnan.identity),
signbit=dict(name="_numpy_signbit_",
operator="SignBit",
inputs=["__in1"],
outputs=["__out"],
code="__out = signbit(__in1)",
reduce=None,
initial=np.signbit.identity),
copysign=dict(name="_numpy_copysign_",
operator="CopySign",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = copysign(__in1, __in2)",
reduce="lambda a, b: copysign(a, b)",
initial=np.copysign.identity),
nextafter=dict(name="_numpy_nextafter_",
operator="NextAfter",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = nextafter(__in1, __in2)",
reduce="lambda a, b: nextafter(a, b)",
initial=np.nextafter.identity),
spacing=dict(name="_numpy_spacing_",
operator="Spacing",
inputs=["__in1"],
outputs=["__out"],
code="__out = nextafter(__in1, inf) - __in1",
reduce=None,
initial=np.spacing.identity),
modf=dict(name="_numpy_modf_",
operator="Modf",
inputs=["__in1"],
outputs=["__out1", "__out2"],
code="np_modf(__in1, __out1, __out2)",
reduce=None,
initial=np.modf.identity),
ldexp=dict(
name="_numpy_ldexp_",
operator="Ldexp",
inputs=["__in1", "__in2"],
outputs=["__out"],
code="__out = ldexp(__in1, __in2)",
# NumPy apparently has np.ldexp.reduce, but for any kind of input array
# it returns "TypeError: No loop matching the specified signature and
# casting was found for ufunc ldexp". Considering that the method
# computes __in1 * 2 ** __in2, it is hard to define a reduction.
reduce=None,
initial=np.ldexp.identity),
frexp=dict(name="_numpy_frexp_",
operator="Frexp",
inputs=["__in1"],
outputs=["__out1", "__out2"],
code="np_frexp(__in1, __out1, __out2)",
reduce=None,
initial=np.frexp.identity),
floor=dict(name="_numpy_floor_",
operator="Floor",
inputs=["__in1"],
outputs=["__out"],
code="__out = floor(__in1)",
reduce=None,
initial=np.floor.identity),
ceil=dict(name="_numpy_ceil_",
operator="Ceil",
inputs=["__in1"],
outputs=["__out"],
code="__out = ceil(__in1)",
reduce=None,
initial=np.ceil.identity),
trunc=dict(name="_numpy_trunc_",
operator="Trunc",
inputs=["__in1"],
outputs=["__out"],
code="__out = trunc(__in1)",
reduce=None,
initial=np.trunc.identity),
)
def _get_ufunc_impl(visitor: 'ProgramVisitor', ast_node: ast.Call, ufunc_name: str) -> Dict[str, Any]:
""" Retrieves the implementation details for a NumPy ufunc call.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param ufunc_name: Name of the ufunc
:raises DaCeSyntaxError: When the ufunc implementation is missing
"""
try:
return ufuncs[ufunc_name]
except KeyError:
raise mem_parser.DaceSyntaxError(visitor, ast_node,
"Missing implementation for NumPy ufunc {f}.".format(f=ufunc_name))
def _validate_ufunc_num_arguments(visitor: 'ProgramVisitor', ast_node: ast.Call, ufunc_name: str, num_inputs: int,
num_outputs: int, num_args: int):
""" Validates the number of positional arguments in a NumPy ufunc call.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param ufunc_name: Name of the ufunc
:param num_inputs: Number of ufunc inputs
:param num_outputs: Number of ufunc outputs
:param num_args: Number of positional argumnents in the ufunc call
:raises DaCeSyntaxError: When validation fails
"""
if num_args > num_inputs + num_outputs:
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Invalid number of arguments in call to numpy.{f} "
"(expected a maximum of {i} input(s) and {o} output(s), "
"but a total of {a} arguments were given).".format(f=ufunc_name, i=num_inputs, o=num_outputs, a=num_args))
def _validate_ufunc_inputs(visitor: 'ProgramVisitor', ast_node: ast.Call, sdfg: SDFG, ufunc_name: str, num_inputs: int,
num_args: int, args: Sequence[UfuncInput]) -> List[UfuncInput]:
""" Validates the number of type of inputs in a NumPy ufunc call.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param sdfg: SDFG object
:param ufunc_name: Name of the ufunc
:param num_inputs: Number of ufunc inputs
:param args: Positional arguments of the ufunc call
:raises DaCeSyntaxError: When validation fails
:returns: List of input datanames and constants
"""
# Validate number of inputs
if num_args > num_inputs:
# Assume that the first "num_inputs" arguments are inputs
inputs = args[:num_inputs]
elif num_args < num_inputs:
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Invalid number of arguments in call to numpy.{f} "
"(expected {e} inputs, but {a} were given).".format(f=ufunc_name, e=num_inputs, a=num_args))
else:
inputs = args
if isinstance(inputs, (list, tuple)):
inputs = list(inputs)
else:
inputs = [inputs]
# Validate type of inputs
for arg in inputs:
if isinstance(arg, str) and arg in sdfg.arrays.keys():
pass
elif isinstance(arg, (Number, sp.Basic)):
pass
else:
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Input arguments in call to numpy.{f} must be of dace.data.Data "
"type or numerical/boolean constants (invalid argument {a})".format(f=ufunc_name, a=arg))
return inputs
def _validate_ufunc_outputs(visitor: 'ProgramVisitor', ast_node: ast.Call, sdfg: SDFG, ufunc_name: str, num_inputs: int,
num_outputs: int, num_args: int, args: Sequence[UfuncInput],
kwargs: Dict[str, Any]) -> List[UfuncOutput]:
""" Validates the number of type of outputs in a NumPy ufunc call.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param sdfg: SDFG object
:param ufunc_name: Name of the ufunc
:param num_inputs: Number of ufunc inputs
:param num_outputs: Number of ufunc outputs
:param args: Positional arguments of the ufunc call
:param kwargs: Keyword arguments of the ufunc call
:raises DaCeSyntaxError: When validation fails
:returns: List of output datanames and None
"""
# Validate number of outputs
num_pos_outputs = num_args - num_inputs
if num_pos_outputs == 0 and "out" not in kwargs.keys():
outputs = [None] * num_outputs
elif num_pos_outputs > 0 and "out" in kwargs.keys():
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "You cannot specify 'out' in call to numpy.{f} as both a positional"
" and keyword argument (positional {p}, keyword {w}).".format(f=ufunc_name,
p=args[num_outputs, :],
k=kwargs['out']))
elif num_pos_outputs > 0:
outputs = list(args[num_inputs:])
# TODO: Support the following undocumented NumPy behavior?
# NumPy allows to specify less than `expected_num_outputs` as
# positional arguments. For example, `np.divmod` has 2 outputs, the
# quotient and the remainder. `np.divmod(A, B, C)` works, but
# `np.divmod(A, B, out=C)` or `np.divmod(A, B, out=(C))` doesn't.
# In the case of output as a positional argument, C will be set to
# the quotient of the floor division, while a new array will be
# generated for the remainder.
else:
outputs = kwargs["out"]
if isinstance(outputs, (list, tuple)):
outputs = list(outputs)
else:
outputs = [outputs]
if len(outputs) != num_outputs:
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Invalid number of arguments in call to numpy.{f} "
"(expected {e} outputs, but {a} were given).".format(f=ufunc_name, e=num_outputs, a=len(outputs)))
# Validate outputs
for arg in outputs:
if arg is None:
pass
elif isinstance(arg, str) and arg in sdfg.arrays.keys():
pass
else:
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Return arguments in call to numpy.{f} must be of "
"dace.data.Data type.".format(f=ufunc_name))
return outputs
def _validate_where_kword(visitor: 'ProgramVisitor', ast_node: ast.Call, sdfg: SDFG, ufunc_name: str,
kwargs: Dict[str, Any]) -> Tuple[bool, Union[str, bool]]:
""" Validates the 'where' keyword argument passed to a NumPy ufunc call.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param sdfg: SDFG object
:param ufunc_name: Name of the ufunc
:param inputs: Inputs of the ufunc call
:raises DaceSyntaxError: When validation fails
:returns: Tuple of a boolean value indicating whether the 'where'
keyword is defined, and the validated 'where' value
"""
has_where = False
where = None
if 'where' in kwargs.keys():
where = kwargs['where']
if isinstance(where, str) and where in sdfg.arrays.keys():
has_where = True
elif isinstance(where, (bool, np.bool_)):
has_where = True
elif isinstance(where, (list, tuple)):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Values for the 'where' keyword that are a sequence of boolean "
" constants are unsupported. Please, pass these values to the "
" {n} call through a DaCe boolean array.".format(n=ufunc_name))
else:
# NumPy defaults to "where=True" for invalid values for the keyword
pass
return has_where, where
def _validate_shapes(visitor: 'ProgramVisitor', ast_node: ast.Call, sdfg: SDFG, ufunc_name: str,
inputs: List[UfuncInput],
outputs: List[UfuncOutput]) -> Tuple[Shape, Tuple[Tuple[str, str], ...], str, List[str]]:
""" Validates the data shapes of inputs and outputs to a NumPy ufunc call.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param sdfg: SDFG object
:param ufunc_name: Name of the ufunc
:param inputs: Inputs of the ufunc call
:param outputs: Outputs of the ufunc call
:raises DaCeSyntaxError: When validation fails
:returns: Tuple with the output shape, the map, output and input indices
"""
shapes = []
for arg in inputs + outputs:
if isinstance(arg, str):
array = sdfg.arrays[arg]
shapes.append(array.shape)
else:
shapes.append([])
try:
result = _broadcast(shapes)
except SyntaxError as e:
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Shape validation in numpy.{f} call failed. The following error "
"occured : {m}".format(f=ufunc_name, m=str(e)))
return result
def _broadcast(shapes: Sequence[Shape]) -> Tuple[Shape, Tuple[Tuple[str, str], ...], str, List[str]]:
""" Applies the NumPy ufunc brodacsting rules in a sequence of data shapes
(see https://numpy.org/doc/stable/reference/ufuncs.html#broadcasting).
:param shapes: Sequence (list, tuple) of data shapes
:raises SyntaxError: When broadcasting fails
:returns: Tuple with the output shape, the map, output and input indices
"""
map_lengths = dict()
output_indices = []
input_indices = [[] for _ in shapes]
ndims = [len(shape) for shape in shapes]
max_i = max(ndims)
def get_idx(i):
return "__i" + str(max_i - i - 1)
def to_string(idx):
return ", ".join(reversed(idx))
reversed_shapes = [reversed(shape) for shape in shapes]
for i, dims in enumerate(itertools.zip_longest(*reversed_shapes)):
output_indices.append(get_idx(i))
not_none_dims = [d for d in dims if d is not None]
# Per NumPy broadcasting rules, we need to find the largest dimension.
# However, `max_dim = max(not_none_dims)` does not work with symbols.
# Therefore, we sequentially check every not-none dimension.
# Symbols are assumed to be larger than constants.
# This will not work properly otherwise.
# If more than 1 (different) symbols are found, then this fails, because
# we cannot know which will have the greater size.
# NOTE: This is a compromise. NumPy broadcasting depends on knowing
# the exact array sizes. However, symbolic sizes are not known at this
# point.
max_dim = 0
for d in not_none_dims:
if isinstance(max_dim, Number):
if isinstance(d, Number):
max_dim = max(max_dim, d)
elif symbolic.issymbolic(d):
max_dim = d
else:
raise NotImplementedError
elif symbolic.issymbolic(max_dim):
if isinstance(d, Number):
pass
elif symbolic.issymbolic(d):
if max_dim != d:
raise NotImplementedError
else:
raise NotImplementedError
map_lengths[get_idx(i)] = max_dim
for j, d in enumerate(dims):
if d is None:
pass
elif d == 1:
input_indices[j].append('0')
elif d == max_dim:
input_indices[j].append(get_idx(i))
else:
raise SyntaxError("Operands could not be broadcast together with shapes {}.".format(','.join(
str(shapes))))
out_shape = tuple(reversed([map_lengths[idx] for idx in output_indices]))
map_indices = [(k, "0:" + str(map_lengths[k])) for k in reversed(output_indices)]
output_indices = to_string(output_indices)
input_indices = [to_string(idx) for idx in input_indices]
if not out_shape:
out_shape = (1, )
output_indices = "0"
return out_shape, map_indices, output_indices, input_indices
def _create_output(sdfg: SDFG,
inputs: List[UfuncInput],
outputs: List[UfuncOutput],
output_shape: Shape,
output_dtype: Union[dtypes.typeclass, List[dtypes.typeclass]],
storage: dtypes.StorageType = None,
force_scalar: bool = False) -> List[UfuncOutput]:
""" Creates output data for storing the result of a NumPy ufunc call.
:param sdfg: SDFG object
:param inputs: Inputs of the ufunc call
:param outputs: Outputs of the ufunc call
:param output_shape: Shape of the output data
:param output_dtype: Datatype of the output data
:param storage: Storage type of the output data
:param force_scalar: If True and output shape is (1,) then output
becomes a dace.data.Scalar, regardless of the data-type of the inputs
:returns: New outputs of the ufunc call
"""
# Check if the result is scalar
is_output_scalar = True
for arg in inputs:
if isinstance(arg, str) and arg in sdfg.arrays.keys():
datadesc = sdfg.arrays[arg]
# If storage is not set, then choose the storage of the first
# data input.
if not storage:
storage = datadesc.storage
# TODO: What about streams?
if not isinstance(datadesc, data.Scalar):
is_output_scalar = False
break
# Set storage
storage = storage or dtypes.StorageType.Default
# Validate datatypes
if isinstance(output_dtype, (list, tuple)):
if len(output_dtype) == 1:
datatypes = [output_dtype[0]] * len(outputs)
elif len(output_dtype) == len(outputs):
datatypes = output_dtype
else:
raise ValueError("Missing output datatypes")
else:
datatypes = [output_dtype] * len(outputs)
# Create output data (if needed)
for i, (arg, datatype) in enumerate(zip(outputs, datatypes)):
if arg is None:
if (len(output_shape) == 1 and output_shape[0] == 1 and (is_output_scalar or force_scalar)):
output_name = sdfg.temp_data_name()
sdfg.add_scalar(output_name, output_dtype, transient=True, storage=storage)
outputs[i] = output_name
else:
outputs[i], _ = sdfg.add_temp_transient(output_shape, datatype)
return outputs
def _set_tasklet_params(ufunc_impl: Dict[str, Any],
inputs: List[UfuncInput],
casting: List[dtypes.typeclass] = None) -> Dict[str, Any]:
""" Sets the tasklet parameters for a NumPy ufunc call.
:param ufunc_impl: Information on how the ufunc must be implemented
:param inputs: Inputs of the ufunc call
:returns: Dictionary with the (1) tasklet name, (2) input connectors,
(3) output connectors, and (4) tasklet code
"""
# (Deep) copy default tasklet parameters from the ufunc_impl dictionary
name = ufunc_impl['name']
inp_connectors = copy.deepcopy(ufunc_impl['inputs'])
out_connectors = copy.deepcopy(ufunc_impl['outputs'])
code = ufunc_impl['code']
# Remove input connectors related to constants
# and fix constants/symbols in the tasklet code
for i, arg in reversed(list(enumerate(inputs))):
inp_conn = inp_connectors[i]
if casting and casting[i]:
repl = "{c}({o})".format(c=str(casting[i]).replace('::', '.'), o=inp_conn)
code = code.replace(inp_conn, repl)
if isinstance(arg, (Number, sp.Basic)):
inp_conn = inp_connectors[i]
code = code.replace(inp_conn, astutils.unparse(arg))
inp_connectors.pop(i)
return dict(name=name, inputs=inp_connectors, outputs=out_connectors, code=code)
def _create_subgraph(visitor: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
inputs: List[UfuncInput],
outputs: List[UfuncOutput],
map_indices: Tuple[str, str],
input_indices: List[str],
output_indices: str,
output_shape: Shape,
tasklet_params: Dict[str, Any],
has_where: bool = False,
where: Union[str, bool] = None):
""" Creates the subgraph that implements a NumPy ufunc call.
:param sdfg: SDFG object
:param state: SDFG State object
:param inputs: Inputs of the ufunc call
:param outputs: Outputs of the ufunc call
:param map_indices: Map (if needed) indices
:param input_indices: Input indices for inner-most memlets
:param output_indices: Output indices for inner-most memlets
:param output_shape: Shape of the output
:param tasklet_params: Dictionary with the tasklet parameters
:param has_where: True if the 'where' keyword is set
:param where: Keyword 'where' value
"""
# Create subgraph
if list(output_shape) == [1]:
# No map needed
if has_where:
if isinstance(where, (bool, np.bool_)):
if where == True:
pass
elif where == False:
return
elif isinstance(where, str) and where in sdfg.arrays.keys():
cond_state = state
where_data = sdfg.arrays[where]
if not isinstance(where_data, data.Scalar):
name = sdfg.temp_data_name()
sdfg.add_scalar(name, where_data.dtype, transient=True)
r = cond_state.add_read(where)
w = cond_state.add_write(name)
cond_state.add_nedge(r, w, dace.Memlet("{}[0]".format(r)))
true_state = sdfg.add_state(label=cond_state.label + '_true')
state = true_state
visitor.last_state = state
cond = name
cond_else = 'not ({})'.format(cond)
sdfg.add_edge(cond_state, true_state, dace.InterstateEdge(cond))
tasklet = state.add_tasklet(**tasklet_params)
inp_conn_idx = 0
for arg in inputs:
if isinstance(arg, str) and arg in sdfg.arrays.keys():
inp_node = state.add_read(arg)
state.add_edge(inp_node, None, tasklet, tasklet_params['inputs'][inp_conn_idx],
dace.Memlet.from_array(arg, sdfg.arrays[arg]))
inp_conn_idx += 1
for i, arg in enumerate(outputs):
if isinstance(arg, str) and arg in sdfg.arrays.keys():
out_node = state.add_write(arg)
state.add_edge(tasklet, tasklet_params['outputs'][i], out_node, None,
dace.Memlet.from_array(arg, sdfg.arrays[arg]))
if has_where and isinstance(where, str) and where in sdfg.arrays.keys():
visitor._add_state(label=cond_state.label + '_true')
sdfg.add_edge(cond_state, visitor.last_state, dace.InterstateEdge(cond_else))
else:
# Map needed
if has_where:
if isinstance(where, (bool, np.bool_)):
if where == True:
pass
elif where == False:
return
elif isinstance(where, str) and where in sdfg.arrays.keys():
nested_sdfg = dace.SDFG(state.label + "_where")
nested_sdfg_inputs = dict()
nested_sdfg_outputs = dict()
nested_sdfg._temp_transients = sdfg._temp_transients
for idx, arg in enumerate(inputs + [where]):
if not (isinstance(arg, str) and arg in sdfg.arrays.keys()):
continue
arg_data = sdfg.arrays[arg]
conn_name = nested_sdfg.temp_data_name()
nested_sdfg_inputs[arg] = (conn_name, input_indices[idx])
if isinstance(arg_data, data.Scalar):
nested_sdfg.add_scalar(conn_name, arg_data.dtype)
elif isinstance(arg_data, data.Array):
nested_sdfg.add_array(conn_name, [1], arg_data.dtype)
else:
raise NotImplementedError
for arg in outputs:
arg_data = sdfg.arrays[arg]
conn_name = nested_sdfg.temp_data_name()
nested_sdfg_outputs[arg] = (conn_name, output_indices)
if isinstance(arg_data, data.Scalar):
nested_sdfg.add_scalar(conn_name, arg_data.dtype)
elif isinstance(arg_data, data.Array):
nested_sdfg.add_array(conn_name, [1], arg_data.dtype)
else:
raise NotImplementedError
cond_state = nested_sdfg.add_state(label=state.label + "_where_cond", is_start_state=True)
where_data = sdfg.arrays[where]
if isinstance(where_data, data.Scalar):
name = nested_sdfg_inputs[where]
elif isinstance(where_data, data.Array):
name = nested_sdfg.temp_data_name()
nested_sdfg.add_scalar(name, where_data.dtype, transient=True)
r = cond_state.add_read(nested_sdfg_inputs[where][0])
w = cond_state.add_write(name)
cond_state.add_nedge(r, w, dace.Memlet("{}[0]".format(r)))
sdfg._temp_transients = nested_sdfg._temp_transients
true_state = nested_sdfg.add_state(label=cond_state.label + '_where_true')
cond = name
cond_else = 'not ({})'.format(cond)
nested_sdfg.add_edge(cond_state, true_state, dace.InterstateEdge(cond))
tasklet = true_state.add_tasklet(**tasklet_params)
idx = 0
for arg in inputs:
if isinstance(arg, str) and arg in sdfg.arrays.keys():
inp_name, _ = nested_sdfg_inputs[arg]
inp_data = nested_sdfg.arrays[inp_name]
inp_node = true_state.add_read(inp_name)
true_state.add_edge(inp_node, None, tasklet, tasklet_params['inputs'][idx],
dace.Memlet.from_array(inp_name, inp_data))
idx += 1
for i, arg in enumerate(outputs):
if isinstance(arg, str) and arg in sdfg.arrays.keys():
out_name, _ = nested_sdfg_outputs[arg]
out_data = nested_sdfg.arrays[out_name]
out_node = true_state.add_write(out_name)
true_state.add_edge(tasklet, tasklet_params['outputs'][i], out_node, None,
dace.Memlet.from_array(out_name, out_data))
false_state = nested_sdfg.add_state(label=state.label + '_where_false')
nested_sdfg.add_edge(cond_state, false_state, dace.InterstateEdge(cond_else))
nested_sdfg.add_edge(true_state, false_state, dace.InterstateEdge())
codenode = state.add_nested_sdfg(nested_sdfg, sdfg, set([n for n, _ in nested_sdfg_inputs.values()]),
set([n for n, _ in nested_sdfg_outputs.values()]))
me, mx = state.add_map(state.label + '_map', map_indices)
for arg in inputs + [where]:
if not (isinstance(arg, str) and arg in sdfg.arrays.keys()):
continue
n = state.add_read(arg)
conn, idx = nested_sdfg_inputs[arg]
state.add_memlet_path(n,
me,
codenode,
memlet=dace.Memlet("{a}[{i}]".format(a=n, i=idx)),
dst_conn=conn)
for arg in outputs:
n = state.add_write(arg)
conn, idx = nested_sdfg_outputs[arg]
state.add_memlet_path(codenode,
mx,
n,
memlet=dace.Memlet("{a}[{i}]".format(a=n, i=idx)),
src_conn=conn)
return
input_memlets = dict()
inp_conn_idx = 0
for arg, idx in zip(inputs, input_indices):
if isinstance(arg, str) and arg in sdfg.arrays.keys():
conn = tasklet_params['inputs'][inp_conn_idx]
input_memlets[conn] = Memlet.simple(arg, idx)
inp_conn_idx += 1
output_memlets = {
out_conn: Memlet.simple(arg, output_indices)
for arg, out_conn in zip(outputs, tasklet_params['outputs'])
}
state.add_mapped_tasklet(tasklet_params['name'],
map_indices,
input_memlets,
tasklet_params['code'],
output_memlets,
external_edges=True)
def _flatten_args(args: Sequence[UfuncInput]) -> Sequence[UfuncInput]:
""" Flattens arguments of a NumPy ufunc. This is useful in cases where
one of the arguments is the result of another operation or ufunc, which
may be a list of Dace data.
"""
flat_args = []
for arg in args:
if isinstance(arg, list):
flat_args.extend(arg)
else:
flat_args.append(arg)
return flat_args
@oprepo.replaces_ufunc('ufunc')
def implement_ufunc(visitor: 'ProgramVisitor', ast_node: ast.Call, sdfg: SDFG, state: SDFGState, ufunc_name: str,
args: Sequence[UfuncInput], kwargs: Dict[str, Any]) -> List[UfuncOutput]:
""" Implements a NumPy ufunc.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param sdfg: SDFG object
:param state: SDFG State object
:param ufunc_name: Name of the ufunc
:param args: Positional arguments of the ufunc call
:param kwargs: Keyword arguments of the ufunc call
:raises DaCeSyntaxError: When validation fails
:returns: List of output datanames
"""
# Flatten arguments
args = _flatten_args(args)
# Get the ufunc implementation details
ufunc_impl = _get_ufunc_impl(visitor, ast_node, ufunc_name)
# Validate number of arguments, inputs, and outputs
num_inputs = len(ufunc_impl['inputs'])
num_outputs = len(ufunc_impl['outputs'])
num_args = len(args)
_validate_ufunc_num_arguments(visitor, ast_node, ufunc_name, num_inputs, num_outputs, num_args)
inputs = _validate_ufunc_inputs(visitor, ast_node, sdfg, ufunc_name, num_inputs, num_args, args)
outputs = _validate_ufunc_outputs(visitor, ast_node, sdfg, ufunc_name, num_inputs, num_outputs, num_args, args,
kwargs)
# Validate 'where' keyword
has_where, where = _validate_where_kword(visitor, ast_node, sdfg, ufunc_name, kwargs)
# Validate data shapes and apply NumPy broadcasting rules
inp_shapes = copy.deepcopy(inputs)
if has_where:
inp_shapes += [where]
(out_shape, map_indices, out_indices, inp_indices) = _validate_shapes(visitor, ast_node, sdfg, ufunc_name,
inp_shapes, outputs)
# Infer result type
result_type, casting = _result_type(
[sdfg.arrays[arg] if isinstance(arg, str) and arg in sdfg.arrays else arg for arg in inputs],
ufunc_impl['operator'])
if 'dtype' in kwargs.keys():
dtype = kwargs['dtype']
if dtype in dtypes.DTYPE_TO_TYPECLASS.keys():
result_type = dtype
# Create output data (if needed)
outputs = _create_output(sdfg, inputs, outputs, out_shape, result_type)
# Set tasklet parameters
tasklet_params = _set_tasklet_params(ufunc_impl, inputs, casting=casting)
# Create subgraph
_create_subgraph(visitor,
sdfg,
state,
inputs,
outputs,
map_indices,
inp_indices,
out_indices,
out_shape,
tasklet_params,
has_where=has_where,
where=where)
return outputs
def _validate_keepdims_kword(visitor: 'ProgramVisitor', ast_node: ast.Call, ufunc_name: str, kwargs: Dict[str,
Any]) -> bool:
""" Validates the 'keepdims' keyword argument of a NumPy ufunc call.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param ufunc_name: Name of the ufunc
:param kwargs: Keyword arguments of the ufunc call
:raises DaCeSyntaxError: When validation fails
:returns: Boolean value of the 'keepdims' keyword argument
"""
keepdims = False
if 'keepdims' in kwargs.keys():
keepdims = kwargs['keepdims']
if not isinstance(keepdims, (Integral, bool, np.bool_)):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Integer or boolean value expected for keyword argument "
"'keepdims' in reduction operation {f} (got {v}).".format(f=ufunc_name, v=keepdims))
if not isinstance(keepdims, (bool, np.bool_)):
keepdims = bool(keepdims)
return keepdims
def _validate_axis_kword(visitor: 'ProgramVisitor', ast_node: ast.Call, sdfg: SDFG, inputs: List[UfuncInput],
kwargs: Dict[str, Any], keepdims: bool) -> Tuple[Tuple[int, ...], Union[Shape, None], Shape]:
""" Validates the 'axis' keyword argument of a NumPy ufunc call.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param sdfg: SDFG object
:param inputs: Inputs of the ufunc call
:param kwargs: Keyword arguments of the ufunc call
:param keepdims: Boolean value of the 'keepdims' keyword argument
:raises DaCeSyntaxError: When validation fails
:returns: The value of the 'axis' keyword argument, the intermediate
data shape (if needed), and the expected output shape
"""
# Validate 'axis' keyword
axis = (0, )
if isinstance(inputs[0], str) and inputs[0] in sdfg.arrays.keys():
inp_shape = sdfg.arrays[inputs[0]].shape
else:
inp_shape = [1]
if 'axis' in kwargs.keys():
# Set to (0, 1, 2, ...) if the keyword arg value is None
axis = kwargs['axis'] or tuple(range(len(inp_shape)))
if axis is not None and not isinstance(axis, (tuple, list)):
axis = (axis, )
if axis is not None:
axis = tuple(pystr_to_symbolic(a) for a in axis)
axis = tuple(normalize_axes(axis, len(inp_shape)))
if len(axis) > len(inp_shape):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Axis {a} is out of bounds for data of dimension {d}".format(a=axis, d=inp_shape))
for a in axis:
if a >= len(inp_shape):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Axis {a} is out of bounds for data of dimension {d}".format(a=a, d=inp_shape))
if keepdims:
intermediate_shape = [d for i, d in enumerate(inp_shape) if i not in axis]
expected_out_shape = [d if i not in axis else 1 for i, d in enumerate(inp_shape)]
else:
intermediate_shape = None
expected_out_shape = [d for i, d in enumerate(inp_shape) if i not in axis]
expected_out_shape = expected_out_shape or [1]
else:
if keepdims:
intermediate_shape = [1]
expected_out_shape = [1] * len(inp_shape)
else:
intermediate_shape = None
expected_out_shape = [1]
return axis, intermediate_shape, expected_out_shape
@oprepo.replaces_ufunc('reduce')
def implement_ufunc_reduce(visitor: 'ProgramVisitor', ast_node: ast.Call, sdfg: SDFG, state: SDFGState, ufunc_name: str,
args: Sequence[UfuncInput], kwargs: Dict[str, Any]) -> List[UfuncOutput]:
""" Implements the 'reduce' method of a NumPy ufunc.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param sdfg: SDFG object
:param state: SDFG State object
:param ufunc_name: Name of the ufunc
:param args: Positional arguments of the ufunc call
:param kwargs: Keyword arguments of the ufunc call
:raises DaCeSyntaxError: When validation fails
:returns: List of output datanames
"""
# Flatten arguments
args = _flatten_args(args)
# Get the ufunc implementation details
ufunc_impl = _get_ufunc_impl(visitor, ast_node, ufunc_name)
# Validate number of arguments, inputs, and outputs
num_inputs = 1
num_outputs = 1
num_args = len(args)
_validate_ufunc_num_arguments(visitor, ast_node, ufunc_name, num_inputs, num_outputs, num_args)
inputs = _validate_ufunc_inputs(visitor, ast_node, sdfg, ufunc_name, num_inputs, num_args, args)
outputs = _validate_ufunc_outputs(visitor, ast_node, sdfg, ufunc_name, num_inputs, num_outputs, num_args, args,
kwargs)
# Validate 'keepdims' keyword
keepdims = _validate_keepdims_kword(visitor, ast_node, ufunc_name, kwargs)
# Validate 'axis' keyword
axis, intermediate_shape, expected_out_shape = _validate_axis_kword(visitor, ast_node, sdfg, inputs, kwargs,
keepdims)
# Validate 'where' keyword
# Throw a warning that it is currently unsupported.
if 'where' in kwargs.keys():
warnings.warn("Keyword argument 'where' in 'reduce' method of NumPy "
"ufunc calls is unsupported. It will be ignored.")
# Validate data shapes and apply NumPy broadcasting rules
# In the case of reduce we may only validate the broadcasting of the
# single input with the 'where' value. Since 'where' is currently
# unsupported, only validate output shape.
# TODO: Maybe add special error when 'keepdims' is True
if isinstance(outputs[0], str) and outputs[0] in sdfg.arrays.keys():
out_shape = sdfg.arrays[outputs[0]].shape
if len(out_shape) < len(expected_out_shape):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Output parameter for reduction operation {f} does not have "
"enough dimensions (output shape {o}, expected shape {e}).".format(f=ufunc_name,
o=out_shape,
e=expected_out_shape))
if len(out_shape) > len(expected_out_shape):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Output parameter for reduction operation {f} has too many "
"dimensions (output shape {o}, expected shape {e}).".format(f=ufunc_name,
o=out_shape,
e=expected_out_shape))
if (list(out_shape) != list(expected_out_shape)):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Output parameter for reduction operation {f} has non-reduction"
" dimension not equal to the input one (output shape {o}, "
"expected shape {e}).".format(f=ufunc_name, o=out_shape, e=expected_out_shape))
else:
out_shape = expected_out_shape
# No casting needed
arg = inputs[0]
if isinstance(arg, str):
datadesc = sdfg.arrays[arg]
result_type = datadesc.dtype
elif isinstance(arg, (Number, np.bool_)):
result_type = dtypes.DTYPE_TO_TYPECLASS[type(arg)]
elif isinstance(arg, sp.Basic):
result_type = _sym_type(arg)
# Create output data (if needed)
outputs = _create_output(sdfg, inputs, outputs, out_shape, result_type, force_scalar=True)
if keepdims:
if (len(intermediate_shape) == 1 and intermediate_shape[0] == 1):
intermediate_name = sdfg.temp_data_name()
sdfg.add_scalar(intermediate_name, result_type, transient=True)
else:
intermediate_name, _ = sdfg.add_temp_transient(intermediate_shape, result_type)
else:
intermediate_name = outputs[0]
# Validate 'initial' keyword
# This is set to be ufunc.identity, when it exists
initial = ufunc_impl['initial']
if 'initial' in kwargs.keys():
# NumPy documentation says that when 'initial' is set to None,
# then the first element of the reduction is used. However, it seems
# that when 'initial' is None and the ufunc has 'identity', then
# ufunc.identity is the default.
initial = kwargs['initial'] or initial
if initial is None:
if isinstance(inputs[0], str) and inputs[0] in sdfg.arrays.keys():
inpdata = sdfg.arrays[inputs[0]]
# In the input data has more than 1 dimensions and 'initial'
# is None, then NumPy uses a different 'initial' value for every
# non-reduced dimension.
if isinstance(inpdata, data.Array):
state.add_mapped_tasklet(
name=state.label + "_reduce_initial",
map_ranges={
"__i{i}".format(i=i): "0:{s}".format(s=s)
for i, s in enumerate(inpdata.shape) if i not in axis
},
inputs={
"__inp":
dace.Memlet("{a}[{i}]".format(a=inputs[0],
i=','.join([
"0" if i in axis else "__i{i}".format(i=i)
for i in range(len(inpdata.shape))
])))
},
outputs={
"__out":
dace.Memlet("{a}[{i}]".format(
a=intermediate_name,
i=','.join(["__i{i}".format(i=i) for i in range(len(inpdata.shape)) if i not in axis])))
},
code="__out = __inp",
external_edges=True)
else:
r = state.add_read(inputs[0])
w = state.add_write(intermediate_name)
state.add.nedge(r, w, dace.Memlet.from_array(inputs[0], inpdata))
state = visitor._add_state(state.label + 'b')
else:
initial = intermediate_name
# Special case for infinity
if np.isinf(initial):
if np.sign(initial) < 0:
initial = dtypes.min_value(result_type)
else:
initial = dtypes.max_value(result_type)
# Create subgraph
if isinstance(inputs[0], str) and inputs[0] in sdfg.arrays.keys():
_reduce(visitor, sdfg, state, ufunc_impl['reduce'], inputs[0], intermediate_name, axis=axis, identity=initial)
else:
tasklet = state.add_tasklet(state.label + "_tasklet", {}, {'__out'}, "__out = {}".format(inputs[0]))
out_node = state.add_write(intermediate_name)
datadesc = sdfg.arrays[intermediate_name]
state.add_edge(tasklet, '__out', out_node, None, dace.Memlet.from_array(intermediate_name, datadesc))
if keepdims:
intermediate_node = None
for n in state.nodes():
if isinstance(n, nodes.AccessNode) and n.data == intermediate_name:
intermediate_node = n
break
if not intermediate_node:
raise ValueError("Keyword argument 'keepdims' is True, but "
"intermediate access node was not found.")
out_node = state.add_write(outputs[0])
state.add_nedge(intermediate_node, out_node, dace.Memlet.from_array(outputs[0], sdfg.arrays[outputs[0]]))
return outputs
@oprepo.replaces_ufunc('accumulate')
def implement_ufunc_accumulate(visitor: 'ProgramVisitor', ast_node: ast.Call, sdfg: SDFG, state: SDFGState,
ufunc_name: str, args: Sequence[UfuncInput], kwargs: Dict[str,
Any]) -> List[UfuncOutput]:
""" Implements the 'accumulate' method of a NumPy ufunc.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param sdfg: SDFG object
:param state: SDFG State object
:param ufunc_name: Name of the ufunc
:param args: Positional arguments of the ufunc call
:param kwargs: Keyword arguments of the ufunc call
:raises DaCeSyntaxError: When validation fails
:returns: List of output datanames
"""
# Flatten arguments
args = _flatten_args(args)
# Get the ufunc implementation details
ufunc_impl = _get_ufunc_impl(visitor, ast_node, ufunc_name)
# Validate number of arguments, inputs, and outputs
num_inputs = 1
num_outputs = 1
num_args = len(args)
_validate_ufunc_num_arguments(visitor, ast_node, ufunc_name, num_inputs, num_outputs, num_args)
inputs = _validate_ufunc_inputs(visitor, ast_node, sdfg, ufunc_name, num_inputs, num_args, args)
outputs = _validate_ufunc_outputs(visitor, ast_node, sdfg, ufunc_name, num_inputs, num_outputs, num_args, args,
kwargs)
# No casting needed
arg = inputs[0]
if isinstance(arg, str) and arg in sdfg.arrays.keys():
datadesc = sdfg.arrays[arg]
if not isinstance(datadesc, data.Array):
raise mem_parser.DaceSyntaxError(visitor, ast_node,
"Cannot accumulate on a dace.data.Scalar or dace.data.Stream.")
out_shape = datadesc.shape
result_type = datadesc.dtype
else:
raise mem_parser.DaceSyntaxError(visitor, ast_node, "Can accumulate only on a dace.data.Array.")
# Validate 'axis' keyword argument
axis = 0
if 'axis' in kwargs.keys():
axis = kwargs['axis'] or axis
if isinstance(axis, (list, tuple)) and len(axis) == 1:
axis = axis[0]
if not isinstance(axis, Integral):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Value of keyword argument 'axis' in 'accumulate' method of {f}"
" must be an integer (value {v}).".format(f=ufunc_name, v=axis))
if axis >= len(out_shape):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Axis {a} is out of bounds for dace.data.Array of dimension "
"{l}".format(a=axis, l=len(out_shape)))
# Normalize negative axis
axis = normalize_axes([axis], len(out_shape))[0]
# Create output data (if needed)
outputs = _create_output(sdfg, inputs, outputs, out_shape, result_type)
# Create subgraph
shape = datadesc.shape
map_range = {"__i{}".format(i): "0:{}".format(s) for i, s in enumerate(shape) if i != axis}
input_idx = ','.join(["__i{}".format(i) if i != axis else "0:{}".format(shape[i]) for i in range(len(shape))])
output_idx = ','.join(["__i{}".format(i) if i != axis else "0:{}".format(shape[i]) for i in range(len(shape))])
nested_sdfg = dace.SDFG(state.label + "_for_loop")
nested_sdfg._temp_transients = sdfg._temp_transients
inpconn = nested_sdfg.temp_data_name()
outconn = nested_sdfg.temp_data_name()
shape = [datadesc.shape[axis]]
strides = [datadesc.strides[axis]]
nested_sdfg.add_array(inpconn, shape, result_type, strides=strides)
nested_sdfg.add_array(outconn, shape, result_type, strides=strides)
init_state = nested_sdfg.add_state(label="init")
r = init_state.add_read(inpconn)
w = init_state.add_write(outconn)
init_state.add_nedge(r, w, dace.Memlet("{a}[{i}] -> {oi}".format(a=inpconn, i='0', oi='0')))
body_state = nested_sdfg.add_state(label="body")
r1 = body_state.add_read(inpconn)
r2 = body_state.add_read(outconn)
w = body_state.add_write(outconn)
t = body_state.add_tasklet(name=state.label + "_for_loop_tasklet",
inputs=ufunc_impl['inputs'],
outputs=ufunc_impl['outputs'],
code=ufunc_impl['code'])
loop_idx = "__i{}".format(axis)
loop_idx_m1 = "__i{} - 1".format(axis)
body_state.add_edge(r1, None, t, '__in1', dace.Memlet("{a}[{i}]".format(a=inpconn, i=loop_idx)))
body_state.add_edge(r2, None, t, '__in2', dace.Memlet("{a}[{i}]".format(a=outconn, i=loop_idx_m1)))
body_state.add_edge(t, '__out', w, None, dace.Memlet("{a}[{i}]".format(a=outconn, i=loop_idx)))
init_expr = str(1)
cond_expr = "__i{i} < {s}".format(i=axis, s=shape[0])
incr_expr = "__i{} + 1".format(axis)
nested_sdfg.add_loop(init_state, body_state, None, loop_idx, init_expr, cond_expr, incr_expr)
sdfg._temp_transients = nested_sdfg._temp_transients
r = state.add_read(inputs[0])
w = state.add_write(outputs[0])
codenode = state.add_nested_sdfg(nested_sdfg, sdfg, {inpconn}, {outconn})
me, mx = state.add_map(state.label + '_map', map_range)
state.add_memlet_path(r,
me,
codenode,
memlet=dace.Memlet("{a}[{i}]".format(a=inputs[0], i=input_idx)),
dst_conn=inpconn)
state.add_memlet_path(codenode,
mx,
w,
memlet=dace.Memlet("{a}[{i}]".format(a=outputs[0], i=output_idx)),
src_conn=outconn)
return outputs
@oprepo.replaces_ufunc('outer')
def implement_ufunc_outer(visitor: 'ProgramVisitor', ast_node: ast.Call, sdfg: SDFG, state: SDFGState, ufunc_name: str,
args: Sequence[UfuncInput], kwargs: Dict[str, Any]) -> List[UfuncOutput]:
""" Implements the 'outer' method of a NumPy ufunc.
:param visitor: ProgramVisitor object handling the ufunc call
:param ast_node: AST node corresponding to the ufunc call
:param sdfg: SDFG object
:param state: SDFG State object
:param ufunc_name: Name of the ufunc
:param args: Positional arguments of the ufunc call
:param kwargs: Keyword arguments of the ufunc call
:raises DaCeSyntaxError: When validation fails
:returns: List of output datanames
"""
# Flatten arguments
args = _flatten_args(args)
# Get the ufunc implementation details
ufunc_impl = _get_ufunc_impl(visitor, ast_node, ufunc_name)
# Validate number of arguments, inputs, and outputs
num_inputs = len(ufunc_impl['inputs'])
num_outputs = len(ufunc_impl['outputs'])
num_args = len(args)
_validate_ufunc_num_arguments(visitor, ast_node, ufunc_name, num_inputs, num_outputs, num_args)
inputs = _validate_ufunc_inputs(visitor, ast_node, sdfg, ufunc_name, num_inputs, num_args, args)
outputs = _validate_ufunc_outputs(visitor, ast_node, sdfg, ufunc_name, num_inputs, num_outputs, num_args, args,
kwargs)
# Validate 'where' keyword
has_where, where = _validate_where_kword(visitor, ast_node, sdfg, ufunc_name, kwargs)
# Validate data shapes
out_shape = []
map_vars = []
map_range = dict()
input_indices = []
output_idx = None
for i, arg in enumerate(inputs):
if isinstance(arg, str) and arg in sdfg.arrays.keys():
datadesc = sdfg.arrays[arg]
if isinstance(datadesc, data.Scalar):
input_idx = '0'
elif isinstance(datadesc, data.Array):
shape = datadesc.shape
out_shape.extend(shape)
map_vars.extend(["__i{i}_{j}".format(i=i, j=j) for j in range(len(shape))])
map_range.update({"__i{i}_{j}".format(i=i, j=j): "0:{}".format(sz) for j, sz in enumerate(shape)})
input_idx = ','.join(["__i{i}_{j}".format(i=i, j=j) for j in range(len(shape))])
if output_idx:
output_idx = ','.join([output_idx, input_idx])
else:
output_idx = input_idx
else:
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Unsuported data type {t} in 'outer' method of NumPy ufunc "
"{f}.".format(t=type(datadesc), f=ufunc_name))
elif isinstance(arg, (Number, sp.Basic)):
input_idx = None
input_indices.append(input_idx)
if has_where and not isinstance(where, (bool, np.bool_)):
where_shape = sdfg.arrays[where].shape
try:
bcast_out_shape, _, _, bcast_inp_indices = _broadcast([out_shape, where_shape])
except SyntaxError:
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "'where' shape {w} could not be broadcast together with 'out' "
"shape {o}.".format(w=where_shape, o=out_shape))
if list(bcast_out_shape) != list(out_shape):
raise mem_parser.DaceSyntaxError(
visitor, ast_node, "Broadcasting 'where' shape {w} together with expected 'out' "
"shape {o} resulted in a different output shape {no}. This is "
"currently unsupported.".format(w=where_shape, o=out_shape, no=bcast_out_shape))
where_idx = bcast_inp_indices[1]
for i in range(len(out_shape)):
where_idx = where_idx.replace("__i{}".format(i), map_vars[i])
input_indices.append(where_idx)
else:
input_indices.append(None)
# Infer result type
result_type, casting = _result_type(
[sdfg.arrays[arg] if isinstance(arg, str) and arg in sdfg.arrays else arg for arg in inputs],
ufunc_impl['operator'])
if 'dtype' in kwargs.keys():
dtype = kwargs['dtype']
if dtype in dtypes.DTYPE_TO_TYPECLASS.keys():
result_type = dtype
# Create output data (if needed)
outputs = _create_output(sdfg, inputs, outputs, out_shape, result_type)
# Set tasklet parameters
tasklet_params = _set_tasklet_params(ufunc_impl, inputs, casting=casting)
# Create subgraph
_create_subgraph(visitor,
sdfg,
state,
inputs,
outputs,
map_range,
input_indices,
output_idx,
out_shape,
tasklet_params,
has_where=has_where,
where=where)
return outputs
@oprepo.replaces('numpy.reshape')
def reshape(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
arr: str,
newshape: Union[str, symbolic.SymbolicType, Tuple[Union[str, symbolic.SymbolicType]]],
order='C') -> str:
if isinstance(arr, (list, tuple)) and len(arr) == 1:
arr = arr[0]
desc = sdfg.arrays[arr]
# "order" determines stride orders
fortran_strides = False
if order == 'F' or (order == 'A' and desc.strides[0] == 1):
# FORTRAN strides
fortran_strides = True
# New shape and strides as symbolic expressions
newshape = [symbolic.pystr_to_symbolic(s) for s in newshape]
if fortran_strides:
strides = [data._prod(newshape[:i]) for i in range(len(newshape))]
else:
strides = [data._prod(newshape[i + 1:]) for i in range(len(newshape))]
newarr, newdesc = sdfg.add_view(arr,
newshape,
desc.dtype,
storage=desc.storage,
strides=strides,
allow_conflicts=desc.allow_conflicts,
total_size=desc.total_size,
may_alias=desc.may_alias,
alignment=desc.alignment,
find_new_name=True)
# Register view with DaCe program visitor
aset = subsets.Range.from_array(desc)
vset = subsets.Range.from_array(newdesc)
pv.views[newarr] = (arr, Memlet(data=arr, subset=aset, other_subset=vset))
return newarr
@oprepo.replaces_method('Array', 'view')
@oprepo.replaces_method('Scalar', 'view')
@oprepo.replaces_method('View', 'view')
def view(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, dtype, type=None) -> str:
if type is not None:
raise ValueError('View to numpy types is not supported')
desc = sdfg.arrays[arr]
# Change size of array based on the differences in bytes
bytemult = desc.dtype.bytes / dtype.bytes
bytediv = dtype.bytes / desc.dtype.bytes
contigdim = next(i for i, s in enumerate(desc.strides) if s == 1)
# For cases that can be recognized, if contiguous dimension is too small
# raise an exception similar to numpy
if (not issymbolic(desc.shape[contigdim], sdfg.constants) and bytemult < 1
and desc.shape[contigdim] % bytediv != 0):
raise ValueError('When changing to a larger dtype, its size must be a divisor of '
'the total size in bytes of the last axis of the array.')
# Create new shape and strides for view
newshape = list(desc.shape)
newstrides = [s * bytemult if i != contigdim else s for i, s in enumerate(desc.strides)]
newshape[contigdim] *= bytemult
newarr, _ = sdfg.add_view(arr,
newshape,
dtype,
storage=desc.storage,
strides=newstrides,
allow_conflicts=desc.allow_conflicts,
total_size=desc.total_size * bytemult,
may_alias=desc.may_alias,
alignment=desc.alignment,
find_new_name=True)
# Register view with DaCe program visitor
# NOTE: We do not create here a Memlet of the form `A[subset] -> osubset`
# because the View can be of a different dtype. Adding `other_subset` in
# such cases will trigger validation error.
pv.views[newarr] = (arr, Memlet.from_array(arr, desc))
return newarr
@oprepo.replaces_attribute('Array', 'size')
@oprepo.replaces_attribute('Scalar', 'size')
@oprepo.replaces_attribute('View', 'size')
def size(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str) -> Size:
desc = sdfg.arrays[arr]
totalsize = data._prod(desc.shape)
return totalsize
@oprepo.replaces_attribute('Array', 'flat')
@oprepo.replaces_attribute('Scalar', 'flat')
@oprepo.replaces_attribute('View', 'flat')
def flat(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, order: str = 'C') -> str:
desc = sdfg.arrays[arr]
totalsize = data._prod(desc.shape)
if order not in ('C', 'F'):
raise NotImplementedError(f'Order "{order}" not yet supported for flattening')
if order == 'C':
contig_strides = tuple(data._prod(desc.shape[i + 1:]) for i in range(len(desc.shape)))
elif order == 'F':
contig_strides = tuple(data._prod(desc.shape[:i]) for i in range(len(desc.shape)))
if desc.total_size != totalsize or desc.strides != contig_strides:
# If data is not contiguous (numpy standard), create copy as explicit map
# warnings.warn(f'Generating explicit copy for non-contiguous array "{arr}"')
newarr, _ = sdfg.add_array(arr, [totalsize],
desc.dtype,
storage=desc.storage,
strides=[1],
allow_conflicts=desc.allow_conflicts,
total_size=totalsize,
may_alias=desc.may_alias,
alignment=desc.alignment,
transient=True,
find_new_name=True)
maprange = {f'__i{i}': (0, s - 1, 1) for i, s in enumerate(desc.shape)}
out_index = sum(symbolic.pystr_to_symbolic(f'__i{i}') * s for i, s in enumerate(contig_strides))
state.add_mapped_tasklet(
'flat',
maprange,
dict(__inp=Memlet(data=arr, subset=','.join(maprange.keys()))),
'__out = __inp',
dict(__out=Memlet(data=newarr, subset=subsets.Range([(out_index, out_index, 1)]))),
external_edges=True,
)
else:
newarr, newdesc = sdfg.add_view(arr, [totalsize],
desc.dtype,
storage=desc.storage,
strides=[1],
allow_conflicts=desc.allow_conflicts,
total_size=totalsize,
may_alias=desc.may_alias,
alignment=desc.alignment,
find_new_name=True)
# Register view with DaCe program visitor
aset = subsets.Range.from_array(desc)
vset = subsets.Range.from_array(newdesc)
pv.views[newarr] = (arr, Memlet(data=arr, subset=aset, other_subset=vset))
return newarr
@oprepo.replaces_attribute('Array', 'T')
@oprepo.replaces_attribute('View', 'T')
def _ndarray_T(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str) -> str:
return _transpose(pv, sdfg, state, arr)
@oprepo.replaces_attribute('Array', 'real')
@oprepo.replaces_attribute('Scalar', 'real')
@oprepo.replaces_attribute('View', 'real')
def _ndarray_real(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str) -> str:
return _real(pv, sdfg, state, arr)
@oprepo.replaces_attribute('Array', 'imag')
@oprepo.replaces_attribute('Scalar', 'imag')
@oprepo.replaces_attribute('View', 'imag')
def _ndarray_imag(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str) -> str:
return _imag(pv, sdfg, state, arr)
@oprepo.replaces_method('Array', 'copy')
@oprepo.replaces_method('Scalar', 'copy')
@oprepo.replaces_method('View', 'copy')
def _ndarray_copy(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str) -> str:
return _numpy_copy(pv, sdfg, state, arr)
@oprepo.replaces_method('Array', 'fill')
@oprepo.replaces_method('Scalar', 'fill')
@oprepo.replaces_method('View', 'fill')
def _ndarray_fill(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, value: Number) -> str:
if not isinstance(value, (Number, np.bool_)):
raise mem_parser.DaceSyntaxError(pv, None, "Fill value {f} must be a number!".format(f=value))
return _elementwise(pv, sdfg, state, "lambda x: {}".format(value), arr, arr)
@oprepo.replaces_method('Array', 'reshape')
@oprepo.replaces_method('View', 'reshape')
def _ndarray_reshape(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
arr: str,
newshape: Union[str, symbolic.SymbolicType, Tuple[Union[str, symbolic.SymbolicType]]],
order='C') -> str:
return reshape(pv, sdfg, state, arr, newshape, order)
@oprepo.replaces_method('Array', 'transpose')
@oprepo.replaces_method('View', 'transpose')
def _ndarray_transpose(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, *axes) -> str:
if len(axes) == 0:
axes = None
elif len(axes) == 1:
axes = axes[0]
return _transpose(pv, sdfg, state, arr, axes)
@oprepo.replaces_method('Array', 'flatten')
@oprepo.replaces_method('Scalar', 'flatten')
@oprepo.replaces_method('View', 'flatten')
def _ndarray_flatten(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, order: str = 'C') -> str:
new_arr = flat(pv, sdfg, state, arr, order)
# `flatten` always returns a copy
if isinstance(new_arr, data.View):
return _ndarray_copy(pv, sdfg, state, new_arr)
return new_arr
@oprepo.replaces_method('Array', 'ravel')
@oprepo.replaces_method('Scalar', 'ravel')
@oprepo.replaces_method('View', 'ravel')
def _ndarray_ravel(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, order: str = 'C') -> str:
# `ravel` returns a copy only when necessary (sounds like ndarray.flat)
return flat(pv, sdfg, state, arr, order)
@oprepo.replaces_method('Array', 'max')
@oprepo.replaces_method('Scalar', 'max')
@oprepo.replaces_method('View', 'max')
def _ndarray_max(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, kwargs: Dict[str, Any] = None) -> str:
kwargs = kwargs or dict(axis=None)
return implement_ufunc_reduce(pv, None, sdfg, state, 'maximum', [arr], kwargs)[0]
@oprepo.replaces_method('Array', 'min')
@oprepo.replaces_method('Scalar', 'min')
@oprepo.replaces_method('View', 'min')
def _ndarray_min(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, kwargs: Dict[str, Any] = None) -> str:
kwargs = kwargs or dict(axis=None)
return implement_ufunc_reduce(pv, None, sdfg, state, 'minimum', [arr], kwargs)[0]
# TODO: It looks like `_argminmax` does not work with a flattened array.
# @oprepo.replaces_method('Array', 'argmax')
# @oprepo.replaces_method('Scalar', 'argmax')
# @oprepo.replaces_method('View', 'argmax')
# def _ndarray_argmax(pv: 'ProgramVisitor',
# sdfg: SDFG,
# state: SDFGState,
# arr: str,
# axis: int = None,
# out: str = None) -> str:
# if not axis:
# axis = 0
# arr = flat(pv, sdfg, state, arr)
# nest, newarr = _argmax(pv, sdfg, state, arr, axis)
# if out:
# r = state.add_read(arr)
# w = state.add_read(newarr)
# state.add_nedge(r, w, dace.Memlet.from_array(newarr, sdfg.arrays[newarr]))
# return new_arr
@oprepo.replaces_method('Array', 'conj')
@oprepo.replaces_method('Scalar', 'conj')
@oprepo.replaces_method('View', 'conj')
def _ndarray_conj(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str) -> str:
return implement_ufunc(pv, None, sdfg, state, 'conj', [arr], {})[0]
@oprepo.replaces_method('Array', 'sum')
@oprepo.replaces_method('Scalar', 'sum')
@oprepo.replaces_method('View', 'sum')
def _ndarray_sum(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, kwargs: Dict[str, Any] = None) -> str:
kwargs = kwargs or dict(axis=None)
return implement_ufunc_reduce(pv, None, sdfg, state, 'add', [arr], kwargs)[0]
@oprepo.replaces_method('Array', 'mean')
@oprepo.replaces_method('Scalar', 'mean')
@oprepo.replaces_method('View', 'mean')
def _ndarray_mean(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, kwargs: Dict[str, Any] = None) -> str:
nest = NestedCall(pv, sdfg, state)
kwargs = kwargs or dict(axis=None)
sumarr = implement_ufunc_reduce(pv, None, sdfg, nest.add_state(), 'add', [arr], kwargs)[0]
desc = sdfg.arrays[arr]
sz = reduce(lambda x, y: x * y, desc.shape)
return nest, _elementwise(pv, sdfg, nest.add_state(), "lambda x: x / {}".format(sz), sumarr)
@oprepo.replaces_method('Array', 'prod')
@oprepo.replaces_method('Scalar', 'prod')
@oprepo.replaces_method('View', 'prod')
def _ndarray_prod(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, kwargs: Dict[str, Any] = None) -> str:
kwargs = kwargs or dict(axis=None)
return implement_ufunc_reduce(pv, None, sdfg, state, 'multiply', [arr], kwargs)[0]
@oprepo.replaces_method('Array', 'all')
@oprepo.replaces_method('Scalar', 'all')
@oprepo.replaces_method('View', 'all')
def _ndarray_all(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, kwargs: Dict[str, Any] = None) -> str:
kwargs = kwargs or dict(axis=None)
return implement_ufunc_reduce(pv, None, sdfg, state, 'logical_and', [arr], kwargs)[0]
@oprepo.replaces_method('Array', 'any')
@oprepo.replaces_method('Scalar', 'any')
@oprepo.replaces_method('View', 'any')
def _ndarray_any(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, kwargs: Dict[str, Any] = None) -> str:
kwargs = kwargs or dict(axis=None)
return implement_ufunc_reduce(pv, None, sdfg, state, 'logical_or', [arr], kwargs)[0]
# Datatype converter #########################################################
def _make_datatype_converter(typeclass: str):
if typeclass == "bool":
dtype = dace.bool
elif typeclass in {"int", "float", "complex"}:
dtype = dtypes.DTYPE_TO_TYPECLASS[eval(typeclass)]
else:
dtype = dtypes.DTYPE_TO_TYPECLASS[eval("np.{}".format(typeclass))]
@oprepo.replaces(typeclass)
@oprepo.replaces("dace.{}".format(typeclass))
@oprepo.replaces("numpy.{}".format(typeclass))
def _converter(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arg: UfuncInput):
return _datatype_converter(sdfg, state, arg, dtype=dtype)
for typeclass in dtypes.TYPECLASS_STRINGS:
_make_datatype_converter(typeclass)
def _datatype_converter(sdfg: SDFG, state: SDFGState, arg: UfuncInput, dtype: dtypes.typeclass) -> UfuncOutput:
""" Out-of-place datatype conversion of the input argument.
:param sdfg: SDFG object
:param state: SDFG State object
:param arg: Input argument
:param dtype: Datatype to convert input argument into
:returns: dace.data.Array of same size as input or dace.data.Scalar
"""
# Get shape and indices
(out_shape, map_indices, out_indices, inp_indices) = _validate_shapes(None, None, sdfg, None, [arg], [None])
# Create output data
outputs = _create_output(sdfg, [arg], [None], out_shape, dtype)
# Set tasklet parameters
impl = {
'name': "_convert_to_{}_".format(dtype.to_string()),
'inputs': ['__inp'],
'outputs': ['__out'],
'code': "__out = dace.{}(__inp)".format(dtype.to_string())
}
tasklet_params = _set_tasklet_params(impl, [arg])
# Visitor input only needed when `has_where == True`.
_create_subgraph(None,
sdfg,
state, [arg],
outputs,
map_indices,
inp_indices,
out_indices,
out_shape,
tasklet_params,
has_where=False,
where=None)
return outputs
@oprepo.replaces_method('Array', 'astype')
@oprepo.replaces_method('Scalar', 'astype')
@oprepo.replaces_method('View', 'astype')
def _ndarray_astype(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, arr: str, dtype: dace.typeclass) -> str:
if isinstance(dtype, type) and dtype in dtypes._CONSTANT_TYPES[:-1]:
dtype = dtypes.typeclass(dtype)
return _datatype_converter(sdfg, state, arr, dtype)[0]
# Replacements that need ufuncs ###############################################
# TODO: Fix by separating to different modules and importing
@oprepo.replaces('dace.dot')
@oprepo.replaces('numpy.dot')
def dot(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op_a: str, op_b: str, op_out=None):
# TODO: Add support for dot(N-D, 1-D) and dot(N-D, M-D) cases.
# See https://numpy.org/doc/stable/reference/generated/numpy.dot.html
# TODO: Add/improve validation
for op in (op_a, op_b):
if not isinstance(op, str) or not op in sdfg.arrays.keys():
raise SyntaxError()
arr_a = sdfg.arrays[op_a]
arr_b = sdfg.arrays[op_b]
if len(arr_a.shape) == 2 and len(arr_b.shape) == 2:
# Matrix multiplication
# TODO: `If op_out`, then this is not correct. We need np.matmult,
# but it is not implemented yet
return _matmult(pv, sdfg, state, op_a, op_b)
if (isinstance(arr_a, data.Scalar) or list(arr_a.shape) == [1] or isinstance(arr_b, data.Scalar)
or list(arr_b.shape) == [1]):
# Case dot(N-D, 0-D), intepreted as np.multiply(a, b)
node = ast.Call()
ufunc_name = 'multiply'
args = [op_a, op_b]
if op_out:
args.append(op_out)
return ufunc_impl(pv, node, ufunc_name, sdfg, state, args)
if len(arr_a.shape) > 2 or len(arr_b.shape) > 2:
raise NotImplementedError
if arr_a.shape[0] != arr_b.shape[0]:
raise SyntaxError()
if op_out:
if not isinstance(op_out, str) or not op_out in sdfg.arrays.keys():
raise SyntaxError()
else:
# Infer result type
restype, _ = _result_type([arr_a, arr_b], 'Mul')
op_out = sdfg.temp_data_name()
sdfg.add_scalar(op_out, restype, transient=True, storage=arr_a.storage)
arr_out = sdfg.arrays[op_out]
from dace.libraries.blas.nodes.dot import Dot # Avoid import loop
acc_a = state.add_read(op_a)
acc_b = state.add_read(op_b)
acc_out = state.add_write(op_out)
tasklet = Dot('_Dot_')
state.add_node(tasklet)
state.add_edge(acc_a, None, tasklet, '_x', dace.Memlet.from_array(op_a, arr_a))
state.add_edge(acc_b, None, tasklet, '_y', dace.Memlet.from_array(op_b, arr_b))
state.add_edge(tasklet, '_result', acc_out, None, dace.Memlet.from_array(op_out, arr_out))
return op_out
# NumPy linalg replacements ###################################################
@oprepo.replaces('dace.linalg.inv')
@oprepo.replaces('numpy.linalg.inv')
def _inv(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, inp_op: str):
if not isinstance(inp_op, str) or not inp_op in sdfg.arrays.keys():
raise SyntaxError()
inp_arr = sdfg.arrays[inp_op]
out_arr = sdfg.add_temp_transient(inp_arr.shape, inp_arr.dtype, storage=inp_arr.storage)
from dace.libraries.linalg import Inv
inp = state.add_read(inp_op)
out = state.add_write(out_arr[0])
inv_node = Inv("inv", overwrite_a=False, use_getri=True)
state.add_memlet_path(inp, inv_node, dst_conn="_ain", memlet=Memlet.from_array(inp_op, inp_arr))
state.add_memlet_path(inv_node, out, src_conn="_aout", memlet=Memlet.from_array(*out_arr))
return out_arr[0]
@oprepo.replaces('dace.linalg.solve')
@oprepo.replaces('numpy.linalg.solve')
def _solve(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, op_a: str, op_b: str):
for op in (op_a, op_b):
if not isinstance(op, str) or not op in sdfg.arrays.keys():
raise SyntaxError()
a_arr = sdfg.arrays[op_a]
b_arr = sdfg.arrays[op_b]
out_arr = sdfg.add_temp_transient(b_arr.shape, b_arr.dtype, storage=b_arr.storage)
from dace.libraries.linalg import Solve
a_inp = state.add_read(op_a)
b_inp = state.add_read(op_b)
out = state.add_write(out_arr[0])
solve_node = Solve("solve")
state.add_memlet_path(a_inp, solve_node, dst_conn="_ain", memlet=Memlet.from_array(op_a, a_arr))
state.add_memlet_path(b_inp, solve_node, dst_conn="_bin", memlet=Memlet.from_array(op_b, b_arr))
state.add_memlet_path(solve_node, out, src_conn="_bout", memlet=Memlet.from_array(*out_arr))
return out_arr[0]
@oprepo.replaces('dace.linalg.cholesky')
@oprepo.replaces('numpy.linalg.cholesky')
def _inv(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, inp_op: str):
if not isinstance(inp_op, str) or not inp_op in sdfg.arrays.keys():
raise SyntaxError()
inp_arr = sdfg.arrays[inp_op]
out_arr = sdfg.add_temp_transient(inp_arr.shape, inp_arr.dtype, storage=inp_arr.storage)
from dace.libraries.linalg import Cholesky
inp = state.add_read(inp_op)
out = state.add_write(out_arr[0])
chlsky_node = Cholesky("cholesky", lower=True)
state.add_memlet_path(inp, chlsky_node, dst_conn="_a", memlet=Memlet.from_array(inp_op, inp_arr))
state.add_memlet_path(chlsky_node, out, src_conn="_b", memlet=Memlet.from_array(*out_arr))
return out_arr[0]
# CuPy replacements
@oprepo.replaces("cupy._core.core.ndarray")
@oprepo.replaces("cupy.ndarray")
def _define_cupy_local(
pv: "ProgramVisitor",
sdfg: SDFG,
state: SDFGState,
shape: Shape,
dtype: typeclass,
):
"""Defines a local array in a DaCe program."""
if not isinstance(shape, (list, tuple)):
shape = [shape]
name, _ = sdfg.add_temp_transient(shape, dtype, storage=dtypes.StorageType.GPU_Global)
return name
@oprepo.replaces('cupy.full')
def _cupy_full(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
shape: Shape,
fill_value: Union[sp.Expr, Number],
dtype: dace.typeclass = None):
""" Creates and array of the specified shape and initializes it with
the fill value.
"""
if isinstance(fill_value, (Number, np.bool_)):
vtype = dtypes.DTYPE_TO_TYPECLASS[type(fill_value)]
elif isinstance(fill_value, sp.Expr):
vtype = _sym_type(fill_value)
else:
raise mem_parser.DaceSyntaxError(pv, None, "Fill value {f} must be a number!".format(f=fill_value))
dtype = dtype or vtype
name, _ = sdfg.add_temp_transient(shape, dtype, storage=dtypes.StorageType.GPU_Global)
state.add_mapped_tasklet(
'_cupy_full_', {"__i{}".format(i): "0: {}".format(s)
for i, s in enumerate(shape)}, {},
"__out = {}".format(fill_value),
dict(__out=dace.Memlet.simple(name, ",".join(["__i{}".format(i) for i in range(len(shape))]))),
external_edges=True)
return name
@oprepo.replaces('cupy.zeros')
def _cupy_zeros(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, shape: Shape, dtype: dace.typeclass = dace.float64):
""" Creates and array of the specified shape and initializes it with zeros.
"""
return _cupy_full(pv, sdfg, state, shape, 0.0, dtype)
@oprepo.replaces('cupy.empty_like')
def _cupy_empty_like(pv: 'ProgramVisitor',
sdfg: SDFG,
state: SDFGState,
prototype: str,
dtype: dace.typeclass = None,
shape: Shape = None):
if prototype not in sdfg.arrays.keys():
raise mem_parser.DaceSyntaxError(pv, None, "Prototype argument {a} is not SDFG data!".format(a=prototype))
desc = sdfg.arrays[prototype]
name, newdesc = sdfg.add_temp_transient_like(desc)
if dtype is not None:
newdesc.dtype = dtype
if shape is not None:
newdesc.shape = shape
return name
@oprepo.replaces('cupy.empty')
@oprepo.replaces('cupy_empty')
def _cupy_empty(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, shape: Shape, dtype: dace.typeclass):
""" Creates an unitialized array of the specificied shape and dtype. """
return _define_cupy_local(pv, sdfg, state, shape, dtype)
| 42.117175
| 120
| 0.586135
|
1517e13d1fe37a4db2ecbb3e3dba4ed2f96d9cf8
| 1,834
|
py
|
Python
|
deep_sort/detection.py
|
Raghav-B/CS3244_Project
|
735aee8817f8e2649f8e214f0abc51bea07b8cc1
|
[
"MIT"
] | null | null | null |
deep_sort/detection.py
|
Raghav-B/CS3244_Project
|
735aee8817f8e2649f8e214f0abc51bea07b8cc1
|
[
"MIT"
] | null | null | null |
deep_sort/detection.py
|
Raghav-B/CS3244_Project
|
735aee8817f8e2649f8e214f0abc51bea07b8cc1
|
[
"MIT"
] | null | null | null |
# vim: expandtab:ts=4:sw=4
import numpy as np
class Detection(object):
"""
This class represents a bounding box detection in a single image.
Parameters
----------
tlwh : array_like
Bounding box in format `(x, y, w, h)`.
confidence : float
Detector confidence score.
feature : array_like
A feature vector that describes the object contained in this image.
Attributes
----------
tlwh : ndarray
Bounding box in format `(top left x, top left y, width, height)`.
confidence : ndarray
Detector confidence score.
class_name : ndarray
Detector class.
feature : ndarray | NoneType
A feature vector that describes the object contained in this image.
"""
def __init__(self, tlwh, confidence, class_name, feature):
self.tlwh = np.asarray(tlwh, dtype=np.float)
self.confidence = float(confidence)
self.class_name = class_name
self.feature = np.asarray(feature, dtype=np.float32)
def get_class(self):
return self.class_name
def to_tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
def to_xyah(self):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
class GroupDetection(Detection):
def __init__(self, tlwh, num_people, feature):
super().__init__(tlwh, 1, "group", feature)
self.num_people = num_people
| 30.065574
| 80
| 0.577426
|
7bba99f22cbbcaf52d9367555e89b62e06fbfa46
| 351
|
py
|
Python
|
board/migrations/0012_remove_notification_ipaddress.py
|
devunt/hydrocarbon
|
3e13f03f8375533c422a6e609742b3d2f4c38c0f
|
[
"MIT"
] | 8
|
2015-04-14T00:36:08.000Z
|
2017-06-02T06:08:51.000Z
|
board/migrations/0012_remove_notification_ipaddress.py
|
devunt/hydrocarbon
|
3e13f03f8375533c422a6e609742b3d2f4c38c0f
|
[
"MIT"
] | 10
|
2020-03-24T15:33:11.000Z
|
2022-03-11T23:16:05.000Z
|
board/migrations/0012_remove_notification_ipaddress.py
|
devunt/hydrocarbon
|
3e13f03f8375533c422a6e609742b3d2f4c38c0f
|
[
"MIT"
] | 2
|
2016-08-04T23:58:29.000Z
|
2016-09-22T10:20:05.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('board', '0011_auto_20150111_1842'),
]
operations = [
migrations.RemoveField(
model_name='notification',
name='ipaddress',
),
]
| 18.473684
| 45
| 0.606838
|
7d322b28484ba16feeaec1ec64b39b056a1f834e
| 8,589
|
py
|
Python
|
avod/core/box_list.py
|
Ascend-Huawei/AVOD
|
ea62372517bbfa9d4020bc5ab2739ee182c63c56
|
[
"BSD-2-Clause"
] | null | null | null |
avod/core/box_list.py
|
Ascend-Huawei/AVOD
|
ea62372517bbfa9d4020bc5ab2739ee182c63c56
|
[
"BSD-2-Clause"
] | null | null | null |
avod/core/box_list.py
|
Ascend-Huawei/AVOD
|
ea62372517bbfa9d4020bc5ab2739ee182c63c56
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferrable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
from npu_bridge.npu_init import *
import tensorflow as tf
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is
not in float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != tf.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.
"""
return self.data['boxes'].get_shape()[0].value
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object;
if no field is specified, it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with tf.name_scope(scope, 'transpose_coordinates'):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| 35.937238
| 80
| 0.641751
|
827b2a7f65366c7ff770e8098fc885891fc58b00
| 8,598
|
py
|
Python
|
matrices.py
|
seanstappas/ecse-543-assignment2
|
d1d017f19c80c0102e9fd1f1135e13c25bcec640
|
[
"MIT"
] | null | null | null |
matrices.py
|
seanstappas/ecse-543-assignment2
|
d1d017f19c80c0102e9fd1f1135e13c25bcec640
|
[
"MIT"
] | null | null | null |
matrices.py
|
seanstappas/ecse-543-assignment2
|
d1d017f19c80c0102e9fd1f1135e13c25bcec640
|
[
"MIT"
] | null | null | null |
from __future__ import division
import copy
import csv
from ast import literal_eval
import math
class Matrix:
def __init__(self, data):
self.data = data
self.num_rows = len(data)
self.num_cols = len(data[0])
def __str__(self):
string = ''
for row in self.data:
string += '\n'
for val in row:
string += '{:6.2f} '.format(val)
return string
def integer_string(self):
string = ''
for row in self.data:
string += '\n'
for val in row:
string += '{:3.0f} '.format(val)
return string
def __add__(self, other):
if len(self) != len(other) or len(self[0]) != len(other[0]):
raise ValueError('Incompatible matrix sizes for addition. Matrix A is {}x{}, but matrix B is {}x{}.'
.format(len(self), len(self[0]), len(other), len(other[0])))
return Matrix([[self[row][col] + other[row][col] for col in range(self.num_cols)]
for row in range(self.num_rows)])
def __sub__(self, other):
if len(self) != len(other) or len(self[0]) != len(other[0]):
raise ValueError('Incompatible matrix sizes for subtraction. Matrix A is {}x{}, but matrix B is {}x{}.'
.format(len(self), len(self[0]), len(other), len(other[0])))
return Matrix([[self[row][col] - other[row][col] for col in range(self.num_cols)]
for row in range(self.num_rows)])
def __mul__(self, other):
if type(other) == float or type(other) == int:
return self.scalar_multiply(other)
if self.num_cols != other.num_rows:
raise ValueError('Incompatible matrix sizes for multiplication. Matrix A is {}x{}, but matrix B is {}x{}.'
.format(self.num_rows, self.num_cols, other.num_rows, other.num_cols))
# Inspired from https://en.wikipedia.org/wiki/Matrix_multiplication
product = Matrix.empty(self.num_rows, other.num_cols)
for i in range(self.num_rows):
for j in range(other.num_cols):
row_sum = 0
for k in range(self.num_cols):
row_sum += self[i][k] * other[k][j]
product[i][j] = row_sum
return product
def scalar_multiply(self, scalar):
return Matrix([[self[row][col] * scalar for col in range(self.num_cols)] for row in range(self.num_rows)])
def __div__(self, other):
"""
Element-wise division.
"""
if self.num_rows != other.num_rows or self.num_cols != other.num_cols:
raise ValueError('Incompatible matrix sizes.')
return Matrix([[self[row][col] / other[row][col] for col in range(self.num_cols)]
for row in range(self.num_rows)])
def __neg__(self):
return Matrix([[-self[row][col] for col in range(self.num_cols)] for row in range(self.num_rows)])
def __deepcopy__(self, memo):
return Matrix(copy.deepcopy(self.data))
def __getitem__(self, item):
return self.data[item]
def __len__(self):
return len(self.data)
def item(self):
"""
:return: the single element contained by this matrix, if it is 1x1.
"""
if not (self.num_rows == 1 and self.num_cols == 1):
raise ValueError('Matrix is not 1x1')
return self.data[0][0]
def is_positive_definite(self):
"""
:return: True if the matrix if positive-definite, False otherwise.
"""
A = copy.deepcopy(self.data)
for j in range(self.num_rows):
if A[j][j] <= 0:
return False
A[j][j] = math.sqrt(A[j][j])
for i in range(j + 1, self.num_rows):
A[i][j] = A[i][j] / A[j][j]
for k in range(j + 1, i + 1):
A[i][k] = A[i][k] - A[i][j] * A[k][j]
return True
def transpose(self):
"""
:return: the transpose of the current matrix
"""
return Matrix([[self.data[row][col] for row in range(self.num_rows)] for col in range(self.num_cols)])
def mirror_horizontal(self):
"""
:return: the horizontal mirror of the current matrix
"""
return Matrix([[self.data[self.num_rows - row - 1][col] for col in range(self.num_cols)]
for row in range(self.num_rows)])
def empty_copy(self):
"""
:return: an empty matrix of the same size as the current matrix.
"""
return Matrix.empty(self.num_rows, self.num_cols)
def infinity_norm(self):
if self.num_cols > 1:
raise ValueError('Not a column vector.')
return max([abs(x) for x in self.transpose()[0]])
def two_norm(self):
if self.num_cols > 1:
raise ValueError('Not a column vector.')
return math.sqrt(sum([x ** 2 for x in self.transpose()[0]]))
def save_to_csv(self, filename):
"""
Saves the current matrix to a CSV file.
:param filename: the name of the CSV file
"""
with open(filename, "wb") as f:
writer = csv.writer(f)
for row in self.data:
writer.writerow(row)
def save_to_latex(self, filename):
"""
Saves the current matrix to a latex-readable matrix.
:param filename: the name of the CSV file
"""
with open(filename, "wb") as f:
for row in range(self.num_rows):
for col in range(self.num_cols):
f.write('{}'.format(self.data[row][col]))
if col < self.num_cols - 1:
f.write('& ')
if row < self.num_rows - 1:
f.write('\\\\\n')
@staticmethod
def multiply(*matrices):
"""
Computes the product of the given matrices.
:param matrices: the matrix objects
:return: the product of the given matrices
"""
n = matrices[0].rows
product = Matrix.identity(n)
for matrix in matrices:
product = product * matrix
return product
@staticmethod
def empty(num_rows, num_cols):
"""
Returns an empty matrix (filled with zeroes) with the specified number of columns and rows.
:param num_rows: number of rows
:param num_cols: number of columns
:return: the empty matrix
"""
return Matrix([[0 for _ in range(num_cols)] for _ in range(num_rows)])
@staticmethod
def identity(n):
"""
Returns the identity matrix of the given size.
:param n: the size of the identity matrix (number of rows or columns)
:return: the identity matrix of size n
"""
return Matrix.diagonal_single_value(1, n)
@staticmethod
def diagonal(values):
"""
Returns a diagonal matrix with the given values along the main diagonal.
:param values: the values along the main diagonal
:return: a diagonal matrix with the given values along the main diagonal
"""
n = len(values)
return Matrix([[values[row] if row == col else 0 for col in range(n)] for row in range(n)])
@staticmethod
def diagonal_single_value(value, n):
"""
Returns a diagonal matrix of the given size with the given value along the diagonal.
:param value: the value of each element on the main diagonal
:param n: the size of the matrix
:return: a diagonal matrix of the given size with the given value along the diagonal.
"""
return Matrix([[value if row == col else 0 for col in range(n)] for row in range(n)])
@staticmethod
def column_vector(values):
"""
Transforms a row vector into a column vector.
:param values: the values, one for each row of the column vector
:return: the column vector
"""
return Matrix([[value] for value in values])
@staticmethod
def csv_to_matrix(filename):
"""
Reads a CSV file to a matrix.
:param filename: the name of the CSV file
:return: a matrix containing the values in the CSV file
"""
with open(filename, 'r') as csv_file:
reader = csv.reader(csv_file)
data = []
for row_number, row in enumerate(reader):
data.append([literal_eval(val) for val in row])
return Matrix(data)
| 34.53012
| 118
| 0.564085
|
9e4b91a632d3a1aa1f09c51852e3dccfa0bde4f9
| 5,419
|
py
|
Python
|
docs/samples/specification/multiapi/generated/azure/multiapi/sample/v3/operations/_operation_group_one_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
docs/samples/specification/multiapi/generated/azure/multiapi/sample/v3/operations/_operation_group_one_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
docs/samples/specification/multiapi/generated/azure/multiapi/sample/v3/operations/_operation_group_one_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 1
|
2022-03-28T08:58:03.000Z
|
2022-03-28T08:58:03.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_test_two_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "3.0.0"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/multiapi/one/testTwoEndpoint')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class OperationGroupOneOperations(object):
"""OperationGroupOneOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.multiapi.sample.v3.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def test_two(
self,
parameter_one=None, # type: Optional["_models.ModelThree"]
**kwargs # type: Any
):
# type: (...) -> "_models.ModelThree"
"""TestTwo should be in OperationGroupOneOperations. Takes in ModelThree and ouputs ModelThree.
:param parameter_one: A ModelThree parameter.
:type parameter_one: ~azure.multiapi.sample.v3.models.ModelThree
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelThree, or the result of cls(response)
:rtype: ~azure.multiapi.sample.v3.models.ModelThree
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ModelThree"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameter_one is not None:
_json = self._serialize.body(parameter_one, 'ModelThree')
else:
_json = None
request = build_test_two_request(
content_type=content_type,
json=_json,
template_url=self.test_two.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ModelThree', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
test_two.metadata = {'url': '/multiapi/one/testTwoEndpoint'} # type: ignore
| 38.985612
| 133
| 0.682045
|
c7228c0171705b0039409d71cba6f805d3a588f9
| 425
|
py
|
Python
|
tests/python/test_stencils.py
|
rezahojabr/taichi
|
122c0352ec480b740a4118819458cbf08d2e5ddb
|
[
"MIT"
] | 3
|
2020-01-08T02:58:51.000Z
|
2020-10-28T07:01:58.000Z
|
tests/python/test_stencils.py
|
rezahojabr/taichi
|
122c0352ec480b740a4118819458cbf08d2e5ddb
|
[
"MIT"
] | null | null | null |
tests/python/test_stencils.py
|
rezahojabr/taichi
|
122c0352ec480b740a4118819458cbf08d2e5ddb
|
[
"MIT"
] | 1
|
2020-03-25T16:37:00.000Z
|
2020-03-25T16:37:00.000Z
|
import taichi as ti
@ti.all_archs
def test_simple():
# Note: access simplification does not work in this case. Maybe worth fixing.
x = ti.var(ti.i32)
y = ti.var(ti.i32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n).place(x, y)
@ti.kernel
def run():
for i in range(n - 1):
x[i] = 1
y[i + 1] = 2
run()
for i in range(n - 1):
assert x[i] == 1
assert y[i + 1] == 2
| 15.740741
| 79
| 0.552941
|
d45ebf0d49ee5be84f5d8a96b10de137eb0f968c
| 2,684
|
py
|
Python
|
trove/cmd/guest.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 1
|
2019-09-20T08:31:54.000Z
|
2019-09-20T08:31:54.000Z
|
trove/cmd/guest.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
trove/cmd/guest.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg as openstack_cfg
from oslo_log import log as logging
from oslo_service import service as openstack_service
from trove.common import cfg
from trove.common import debug_utils
from trove.common.i18n import _
from trove.guestagent import api as guest_api
CONF = cfg.CONF
# The guest_id opt definition must match the one in common/cfg.py
CONF.register_opts([openstack_cfg.StrOpt('guest_id', default=None,
help="ID of the Guest Instance."),
openstack_cfg.StrOpt('instance_rpc_encr_key',
help=('Key (OpenSSL aes_cbc) for '
'instance RPC encryption.'))])
def main():
cfg.parse_args(sys.argv)
logging.setup(CONF, None)
debug_utils.setup()
from trove.guestagent import dbaas
manager = dbaas.datastore_registry().get(CONF.datastore_manager)
if not manager:
msg = (_("Manager class not registered for datastore manager %s") %
CONF.datastore_manager)
raise RuntimeError(msg)
if not CONF.guest_id:
msg = (_("The guest_id parameter is not set. guest_info.conf "
"was not injected into the guest or not read by guestagent"))
raise RuntimeError(msg)
# BUG(1650518): Cleanup in the Pike release
# make it fatal if CONF.instance_rpc_encr_key is None
# rpc module must be loaded after decision about thread monkeypatching
# because if thread module is not monkeypatched we can't use eventlet
# executor from oslo_messaging library.
from trove import rpc
rpc.init(CONF)
from trove.common.rpc import service as rpc_service
server = rpc_service.RpcService(
key=CONF.instance_rpc_encr_key,
topic="guestagent.%s" % CONF.guest_id,
manager=manager, host=CONF.guest_id,
rpc_api_version=guest_api.API.API_LATEST_VERSION)
launcher = openstack_service.launch(CONF, server, restart_method='mutate')
launcher.wait()
| 37.802817
| 78
| 0.688152
|
fe92b77d764d3ff9ea4b18a96073536c9452e41b
| 1,040
|
py
|
Python
|
papermache/common.py
|
Boukalikrates/papermache
|
8099da45ee094467d0f052aa9f970d89e97ea502
|
[
"MIT"
] | null | null | null |
papermache/common.py
|
Boukalikrates/papermache
|
8099da45ee094467d0f052aa9f970d89e97ea502
|
[
"MIT"
] | null | null | null |
papermache/common.py
|
Boukalikrates/papermache
|
8099da45ee094467d0f052aa9f970d89e97ea502
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# coding=utf-8
from hashlib import md5
import json
import urllib
def loadconfig(path,local=False):
try:
f=open(path+('/.papermache-config.json' if local else '/papermache/config.json'),'r')
config=json.loads(f.read())
#config=json.loads('{}')
f.close()
return config
except:
return {}
ext=lambda name: name[name.rfind('.')+1:]
def web(link):
return urllib.parse.quote(link, safe='')
#return link.replace('"','%22').replace("'",'%27').replace('?','%3F').replace('#','%23').replace(';','%3B').replace(' ','%20')
def md5hd(i):
return md5(i.encode('utf-8')).hexdigest()
lower=lambda i:i.lower()
def randomi(i):
return ''
def convert_bytes(num):
"""
this function will convert bytes to MB.... GB... etc
"""
if num < 1024.0:
return "%3.0f bytes" % (num)
num /= 1024.0
for x in ['KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
| 23.636364
| 130
| 0.55
|
ded4125edda48933fd12bd1beaffc2b5d3b1f9f3
| 2,295
|
py
|
Python
|
setup.py
|
Dennis-van-Gils/python-dvg-debug-functions
|
377d25ccea817b06c502443639c2ba50ca3341f1
|
[
"MIT"
] | null | null | null |
setup.py
|
Dennis-van-Gils/python-dvg-debug-functions
|
377d25ccea817b06c502443639c2ba50ca3341f1
|
[
"MIT"
] | null | null | null |
setup.py
|
Dennis-van-Gils/python-dvg-debug-functions
|
377d25ccea817b06c502443639c2ba50ca3341f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")
) as fh:
return fh.read()
setup(
name="dvg-debug-functions",
license="MIT",
version="2.1.3", # PICK UP FROM 2.1.3, because 2.1.2 got yanked
description="Functions to neatly print debug information to the terminal output, well-suited for multithreaded programs.",
long_description="%s\n%s"
% (
re.compile("^.. start-badges.*^.. end-badges", re.M | re.S).sub(
"", read("README.rst")
),
re.sub(":[a-z]+:`~?(.*?)`", r"``\1``", read("CHANGELOG.rst")),
),
long_description_content_type="text/x-rst",
author="Dennis van Gils",
author_email="vangils.dennis@gmail.com",
url="https://github.com/Dennis-van-Gils/python-dvg-debug-functions",
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Utilities",
],
project_urls={
"Issue Tracker": "https://github.com/Dennis-van-Gils/python-dvg-debug-functions/issues",
},
keywords=[
"multithread",
"traceback",
"debugging",
"utility",
"fancy",
],
python_requires=">=3.6",
install_requires=[],
extras_require={},
)
| 31.438356
| 126
| 0.624401
|
e2ca3775d496dd7a313861fcbc4ec499da9a9715
| 2,925
|
py
|
Python
|
pyfeatures/app/tiles.py
|
ome/pydoop-features
|
189ee7a8cd28e92be9b7e5f51b61f39449564d2a
|
[
"Apache-2.0"
] | 2
|
2017-03-21T14:05:19.000Z
|
2017-05-20T17:33:55.000Z
|
pyfeatures/app/tiles.py
|
ome/pydoop-features
|
189ee7a8cd28e92be9b7e5f51b61f39449564d2a
|
[
"Apache-2.0"
] | 15
|
2017-04-12T11:09:58.000Z
|
2017-12-20T07:51:08.000Z
|
pyfeatures/app/tiles.py
|
IDR/pydoop-features
|
fcb21c69287910fbb5707b4f246ea526dc2b75a7
|
[
"Apache-2.0"
] | 4
|
2017-03-13T16:00:58.000Z
|
2017-11-30T15:33:29.000Z
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014-2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Generate tiles according to the given parameters and output a
visual representation of the resulting coverage.
"""
import numpy as np
from pyfeatures.feature_calc import gen_tiles
IMG_ALPHA = 0.2
TILE_ALPHA = 0.3
MAX_SMALL_SIZE = 32
def add_parser(subparsers):
parser = subparsers.add_parser("tiles", description=__doc__)
parser.add_argument("iW", type=int, metavar="WIDTH", help="image width")
parser.add_argument("iH", type=int, metavar="HEIGHT", help="image height")
parser.add_argument("-W", type=int, metavar="INT", help="tile width")
parser.add_argument("-H", type=int, metavar="INT", help="tile height")
parser.add_argument("-x", type=int, metavar="INT", help="tile x-distance")
parser.add_argument("-y", type=int, metavar="INT", help="tile y-distance")
parser.add_argument("--offset-x", type=int, metavar="INT",
help="tile initial x-offset")
parser.add_argument("--offset-y", type=int, metavar="INT",
help="tile initial y-offset")
parser.add_argument('-o', '--out-fn', metavar='FILE', default="tiles.png",
help="output file (extension = img format)")
parser.set_defaults(func=run)
return parser
def run(logger, args, extra_argv=None):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
img_array = np.zeros((args.iH, args.iW), dtype="i1")
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.add_patch(patches.Rectangle((0, 0), args.iW, args.iH, alpha=IMG_ALPHA))
mx = max(1, .05 * args.iW)
my = max(1, .05 * args.iH)
ax.axis([-mx, args.iW + mx, -my, args.iH + my])
for i, j, tile in gen_tiles(img_array, w=args.W, h=args.H,
dx=args.x, dy=args.y,
ox=args.offset_x, oy=args.offset_y):
h, w = tile.shape
ax.add_patch(patches.Rectangle((j, i), w, h, alpha=TILE_ALPHA))
logger.debug("%r", (j, i, w, h))
ax.invert_yaxis()
if max(args.iW, args.iH) <= MAX_SMALL_SIZE:
ax.set_xticks(xrange(args.iW + 1))
ax.set_yticks(xrange(args.iH + 1))
ax.grid()
logger.info("writing to %r" % (args.out_fn,))
fig.savefig(args.out_fn)
| 37.5
| 78
| 0.653675
|
bd5a0ab6dcea5fb668589ca36264cad34d0cd1cf
| 6,759
|
py
|
Python
|
grvx/viz/compare_freq.py
|
UMCU-RIBS/grvx
|
0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8
|
[
"MIT"
] | 1
|
2021-11-25T08:12:48.000Z
|
2021-11-25T08:12:48.000Z
|
grvx/viz/compare_freq.py
|
UMCU-RIBS/grvx
|
0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8
|
[
"MIT"
] | null | null | null |
grvx/viz/compare_freq.py
|
UMCU-RIBS/grvx
|
0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8
|
[
"MIT"
] | null | null | null |
from numpy import max, r_, mean
from scipy.stats import ttest_rel
from scipy.stats import linregress
from bidso.utils import read_tsv
import plotly.graph_objs as go
from .paths import get_path
axis_label = lambda freq: f'Frequency {freq[0]} - {freq[1]} Hz'
def plot_freq_comparison(parameters):
freqA = parameters['ieeg']['ecog_compare']['frequency_bands'][parameters['plot']['compare']['freqA']]
freqB = parameters['ieeg']['ecog_compare']['frequency_bands'][parameters['plot']['compare']['freqB']]
actA = read_tsv(get_path(parameters, 'summary_tsv', frequency_band=freqA))
actB = read_tsv(get_path(parameters, 'summary_tsv', frequency_band=freqB))
max_r = max(r_[actA['r2_at_peak'], actB['r2_at_peak']])
result = ttest_rel(actA['r2_at_peak'], actB['r2_at_peak'])
traces = [
go.Scatter(
x=actA['r2_at_peak'],
y=actB['r2_at_peak'],
text=actA['subject'],
mode='markers',
marker=dict(
color='black',
),
)
]
figs = []
fig = go.Figure(
data=traces,
layout=go.Layout(
height=500,
width=500,
title=dict(
text=f'R<sup>2</sub> values (paired t-test, <i>p</i> = {result.pvalue:0.03f})'
),
xaxis=dict(
title=dict(
text=axis_label(freqA),
),
tick0=0,
dtick=0.1,
range=[0, max_r + 0.1],
),
yaxis=dict(
title=dict(
text=axis_label(freqB),
),
tick0=0,
dtick=0.1,
range=[0, max_r + 0.1],
),
shapes=[
dict(
type='line',
layer='below',
x0=0,
x1=max_r + 0.1,
y0=0,
y1=max_r + 0.1,
line=dict(
color='gray',
)
)
]
)
)
figs.append(fig)
for param in ('size_at_peak', 'size_at_concave'):
fig = _plot_compare_size(actA, actB, param, parameters, freqA, freqB)
figs.append(fig)
param = 'slope_at_peak'
min_r = min(r_[actA[param], actB[param]])
max_r = max(r_[actA[param], actB[param]])
diff_act = mean(actA[param] - actB[param])
result = ttest_rel(actA[param], actB[param])
regr = linregress(actA['slope_at_peak'], actB['slope_at_peak'])
traces = [
go.Scatter(
x=actA[param],
y=actB[param],
text=actA['subject'],
mode='markers',
marker=dict(
color='black',
),
)
]
fig = go.Figure(
data=traces,
layout=go.Layout(
height=500,
width=500,
title=dict(
text=f'Difference [{freqA[0]}-{freqA[1]}] Hz - [{freqB[0]}-{freqB[1]}] Hz = {diff_act:0.2f}<br />paired t-test, <i>p</i> = {result.pvalue:0.03f}<br />regression slope = {regr.slope:0.3f} <i>p</i> = {regr.pvalue:0.03f}'
),
xaxis=dict(
title=dict(
text=axis_label(freqA),
),
tick0=0,
dtick=0.2,
range=[min_r - 0.1, max_r + 0.1],
),
yaxis=dict(
title=dict(
text=axis_label(freqB),
),
tick0=0,
dtick=0.2,
range=[min_r - 0.1, max_r + 0.1],
scaleanchor="x",
scaleratio=1,
),
shapes=[
dict(
type='line',
layer='below',
x1=-min_r - 0.1,
x0=-max_r - 0.1,
y1=min_r + 0.1,
y0=max_r + 0.1,
line=dict(
color='gray',
)
),
dict(
type='line',
layer='below',
x0=0,
x1=1,
y0=0,
y1=0,
xref='paper',
line=dict(
width=2,
color='gray',
)
),
dict(
type='line',
layer='below',
x0=0,
x1=0,
y0=0,
y1=1,
yref='paper',
line=dict(
width=2,
color='gray',
)
),
]
)
)
figs.append(fig)
return figs
def _plot_compare_size(actA, actB, param, parameters, freqA, freqB):
diff_act = mean(actA[param] - actB[param])
result = ttest_rel(actA[param], actB[param])
traces = [
go.Scatter(
x=actA[param],
y=actB[param],
text=actA['subject'],
mode='markers',
marker=dict(
color='black',
),
)
]
fig = go.Figure(
data=traces,
layout=go.Layout(
height=500,
width=500,
title=dict(
text=f'{param}<br />Difference [{freqA[0]}-{freqA[1]}] Hz - [{freqB[0]}-{freqB[1]}] Hz = {diff_act:0.2f}<br />paired t-test, <i>p</i> = {result.pvalue:0.03f}'
),
xaxis=dict(
title=dict(
text=axis_label(freqA),
),
tick0=0,
dtick=5,
range=[0, parameters['fmri']['at_elec']['kernel_end'] + 1],
),
yaxis=dict(
title=dict(
text=axis_label(freqB),
),
tick0=0,
dtick=5,
range=[0, parameters['fmri']['at_elec']['kernel_end'] + 1],
scaleanchor="x",
scaleratio=1,
),
shapes=[
dict(
type='line',
layer='below',
x0=0,
x1=parameters['fmri']['at_elec']['kernel_end'] + 1,
y0=0,
y1=parameters['fmri']['at_elec']['kernel_end'] + 1,
line=dict(
color='gray',
)
)
]
)
)
return fig
| 29.386957
| 234
| 0.389111
|
02bc8bee53339849ab0e688629574e63ec11531e
| 7,821
|
py
|
Python
|
tests/unit/test_helpers.py
|
KoffieLabs/python-bigquery-sqlalchemy
|
8315ce5a4d9cd3428cbf8bfe8a7db9ae3990a2da
|
[
"MIT"
] | null | null | null |
tests/unit/test_helpers.py
|
KoffieLabs/python-bigquery-sqlalchemy
|
8315ce5a4d9cd3428cbf8bfe8a7db9ae3990a2da
|
[
"MIT"
] | null | null | null |
tests/unit/test_helpers.py
|
KoffieLabs/python-bigquery-sqlalchemy
|
8315ce5a4d9cd3428cbf8bfe8a7db9ae3990a2da
|
[
"MIT"
] | null | null | null |
# Copyright 2021 The sqlalchemy-bigquery Authors
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
import base64
import json
from unittest import mock
import google.auth
import google.auth.credentials
import pytest
from google.oauth2 import service_account
class AnonymousCredentialsWithProject(google.auth.credentials.AnonymousCredentials):
"""Fake credentials to trick isinstance"""
def __init__(self, project):
super().__init__()
self.project_id = project
def with_scopes(self, scopes):
return self
@pytest.fixture(scope="session")
def module_under_test():
from sqlalchemy_bigquery import _helpers
return _helpers
def test_create_bigquery_client_with_credentials_path(monkeypatch, module_under_test):
mock_service_account = mock.create_autospec(service_account.Credentials)
mock_service_account.from_service_account_file.return_value = (
AnonymousCredentialsWithProject("service-account-project")
)
monkeypatch.setattr(service_account, "Credentials", mock_service_account)
bqclient = module_under_test.create_bigquery_client(
credentials_path="path/to/key.json",
)
assert bqclient.project == "service-account-project"
def test_create_bigquery_client_with_credentials_path_respects_project(
monkeypatch, module_under_test
):
"""Test that project_id is used, even when there is a default project.
https://github.com/googleapis/python-bigquery-sqlalchemy/issues/48
"""
mock_service_account = mock.create_autospec(service_account.Credentials)
mock_service_account.from_service_account_file.return_value = (
AnonymousCredentialsWithProject("service-account-project")
)
monkeypatch.setattr(service_account, "Credentials", mock_service_account)
bqclient = module_under_test.create_bigquery_client(
credentials_path="path/to/key.json",
project_id="connection-url-project",
)
assert bqclient.project == "connection-url-project"
def test_create_bigquery_client_with_credentials_info(monkeypatch, module_under_test):
mock_service_account = mock.create_autospec(service_account.Credentials)
mock_service_account.from_service_account_info.return_value = (
AnonymousCredentialsWithProject("service-account-project")
)
monkeypatch.setattr(service_account, "Credentials", mock_service_account)
bqclient = module_under_test.create_bigquery_client(
credentials_info={
"type": "service_account",
"project_id": "service-account-project",
},
)
assert bqclient.project == "service-account-project"
def test_create_bigquery_client_with_credentials_info_respects_project(
monkeypatch, module_under_test
):
"""Test that project_id is used, even when there is a default project.
https://github.com/googleapis/python-bigquery-sqlalchemy/issues/48
"""
mock_service_account = mock.create_autospec(service_account.Credentials)
mock_service_account.from_service_account_info.return_value = (
AnonymousCredentialsWithProject("service-account-project")
)
monkeypatch.setattr(service_account, "Credentials", mock_service_account)
bqclient = module_under_test.create_bigquery_client(
credentials_info={
"type": "service_account",
"project_id": "service-account-project",
},
project_id="connection-url-project",
)
assert bqclient.project == "connection-url-project"
def test_create_bigquery_client_with_credentials_base64(monkeypatch, module_under_test):
mock_service_account = mock.create_autospec(service_account.Credentials)
mock_service_account.from_service_account_info.return_value = (
AnonymousCredentialsWithProject("service-account-project")
)
monkeypatch.setattr(service_account, "Credentials", mock_service_account)
credentials_info = (
{"type": "service_account", "project_id": "service-account-project"},
)
credentials_base64 = base64.b64encode(json.dumps(credentials_info).encode())
bqclient = module_under_test.create_bigquery_client(
credentials_base64=credentials_base64
)
assert bqclient.project == "service-account-project"
def test_create_bigquery_client_with_credentials_base64_respects_project(
monkeypatch, module_under_test
):
"""Test that project_id is used, even when there is a default project.
https://github.com/googleapis/python-bigquery-sqlalchemy/issues/48
"""
mock_service_account = mock.create_autospec(service_account.Credentials)
mock_service_account.from_service_account_info.return_value = (
AnonymousCredentialsWithProject("service-account-project")
)
monkeypatch.setattr(service_account, "Credentials", mock_service_account)
credentials_info = (
{"type": "service_account", "project_id": "service-account-project"},
)
credentials_base64 = base64.b64encode(json.dumps(credentials_info).encode())
bqclient = module_under_test.create_bigquery_client(
credentials_base64=credentials_base64,
project_id="connection-url-project",
)
assert bqclient.project == "connection-url-project"
def test_create_bigquery_client_with_default_credentials(
monkeypatch, module_under_test
):
def mock_default_credentials(*args, **kwargs):
return (google.auth.credentials.AnonymousCredentials(), "default-project")
monkeypatch.setattr(google.auth, "default", mock_default_credentials)
bqclient = module_under_test.create_bigquery_client()
assert bqclient.project == "default-project"
def test_create_bigquery_client_with_default_credentials_respects_project(
monkeypatch, module_under_test
):
"""Test that project_id is used, even when there is a default project.
https://github.com/googleapis/python-bigquery-sqlalchemy/issues/48
"""
def mock_default_credentials(*args, **kwargs):
return (google.auth.credentials.AnonymousCredentials(), "default-project")
monkeypatch.setattr(google.auth, "default", mock_default_credentials)
bqclient = module_under_test.create_bigquery_client(
project_id="connection-url-project",
)
assert bqclient.project == "connection-url-project"
def test_substitute_string_re(module_under_test):
import re
foo_to_baz = module_under_test.substitute_string_re_method(
"foo", flags=re.IGNORECASE, repl="baz"
)
assert (
foo_to_baz(object(), "some foo and FOO is good") == "some baz and baz is good"
)
def test_substitute_re_func(module_under_test):
import re
@module_under_test.substitute_re_method("foo", re.IGNORECASE)
def Foo_to_bar(self, m):
return "bar"
assert (
Foo_to_bar(object(), "some foo and FOO is good") == "some bar and bar is good"
)
@module_under_test.substitute_re_method("foo")
def foo_to_bar(self, m, x="bar"):
return x
assert (
foo_to_bar(object(), "some foo and FOO is good") == "some bar and FOO is good"
)
assert (
foo_to_bar(object(), "some foo and FOO is good", "hah")
== "some hah and FOO is good"
)
assert (
foo_to_bar(object(), "some foo and FOO is good", x="hah")
== "some hah and FOO is good"
)
assert foo_to_bar.__name__ == "foo_to_bar"
def test_substitute_re_func_self(module_under_test):
class Replacer:
def __init__(self, x):
self.x = x
@module_under_test.substitute_re_method("foo")
def foo_to_bar(self, m):
return self.x
assert (
Replacer("hah").foo_to_bar("some foo and FOO is good")
== "some hah and FOO is good"
)
| 31.663968
| 88
| 0.730981
|
70fd69cdbb001947e6a818aefd0615cc8d8f0913
| 427
|
py
|
Python
|
backend/ideapros_llc_synvio_31951/wsgi.py
|
crowdbotics-apps/ideapros-llc-synvio-31951
|
32b384f6a4975f8fc953bc7391d2843b719d2d13
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/ideapros_llc_synvio_31951/wsgi.py
|
crowdbotics-apps/ideapros-llc-synvio-31951
|
32b384f6a4975f8fc953bc7391d2843b719d2d13
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/ideapros_llc_synvio_31951/wsgi.py
|
crowdbotics-apps/ideapros-llc-synvio-31951
|
32b384f6a4975f8fc953bc7391d2843b719d2d13
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for ideapros_llc_synvio_31951 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ideapros_llc_synvio_31951.settings')
application = get_wsgi_application()
| 25.117647
| 85
| 0.803279
|
419646ec36f749816197ea4ffb69c76f75f3479a
| 423
|
py
|
Python
|
examples_to_use/all_features_example.py
|
Eegii2233/tradingbot
|
a4d8b048f90cef74136c12f9c45862ab390c6324
|
[
"MIT"
] | null | null | null |
examples_to_use/all_features_example.py
|
Eegii2233/tradingbot
|
a4d8b048f90cef74136c12f9c45862ab390c6324
|
[
"MIT"
] | null | null | null |
examples_to_use/all_features_example.py
|
Eegii2233/tradingbot
|
a4d8b048f90cef74136c12f9c45862ab390c6324
|
[
"MIT"
] | null | null | null |
"""This is a example adding all technical analysis features implemented in
this library.
"""
import pandas as pd
import ta
# Load data
df = pd.read_csv('../data/datas.csv', sep=',')
# Clean nan values
df = ta.utils.dropna(df)
print(df.columns)
# Add all ta features filling nans values
df = ta.add_all_ta_features(df, "Open", "High", "Low", "Close", "Volume_BTC", fillna=True)
print(df.columns)
print(len(df.columns))
| 21.15
| 90
| 0.711584
|
bf5ea75c0a67371f89833cc073f335bc92883e04
| 1,064
|
py
|
Python
|
escher/cmd/esdsl.py
|
msh5/escher
|
004e90e6df70a15a00331a5c27bf2aef77608d30
|
[
"Apache-2.0"
] | null | null | null |
escher/cmd/esdsl.py
|
msh5/escher
|
004e90e6df70a15a00331a5c27bf2aef77608d30
|
[
"Apache-2.0"
] | null | null | null |
escher/cmd/esdsl.py
|
msh5/escher
|
004e90e6df70a15a00331a5c27bf2aef77608d30
|
[
"Apache-2.0"
] | null | null | null |
'''
Define subcommands for 'esdsl'.
'''
import json
import click
from escher import __version__
@click.group()
@click.option('--pretty', '-p', is_flag=True)
@click.option('--indent', '-n', type=int)
@click.version_option(version=__version__, message='escher %(version)s')
@click.pass_context
def cli(ctx, pretty, indent):
if pretty:
indent = 4
if indent:
ctx.obj['indent_size'] = indent
def echo_query(ctx, query):
indent_size = None
if 'indent_size' in ctx.obj:
indent_size = ctx.obj['indent_size']
resp = json.dumps(query, indent=indent_size)
click.echo(resp)
@click.command()
@click.option('--boost', '-b', type=float)
@click.pass_context
def match_all(ctx, boost):
query = {'match_all': {}}
if boost:
query['match_all']['boost'] = boost
echo_query(ctx, query)
@click.command()
@click.pass_context
def match_none(ctx):
echo_query(ctx, {"match_none": {}})
cli.add_command(match_all, name="match-all")
cli.add_command(match_none, name="match-none")
def main():
cli(obj={})
| 20.075472
| 72
| 0.660714
|
b2f96b92c7b232f7cc5c3c43391785bc19b7dc74
| 8,050
|
py
|
Python
|
test/extensions/problem.py
|
jabader97/backpack
|
089daafa0d611e13901fd7ecf8a0d708ce7a5928
|
[
"MIT"
] | 395
|
2019-10-04T09:37:52.000Z
|
2022-03-29T18:00:56.000Z
|
test/extensions/problem.py
|
jabader97/backpack
|
089daafa0d611e13901fd7ecf8a0d708ce7a5928
|
[
"MIT"
] | 78
|
2019-10-11T18:56:43.000Z
|
2022-03-23T01:49:54.000Z
|
test/extensions/problem.py
|
jabader97/backpack
|
089daafa0d611e13901fd7ecf8a0d708ce7a5928
|
[
"MIT"
] | 50
|
2019-10-03T16:31:10.000Z
|
2022-03-15T19:36:14.000Z
|
"""Convert problem settings."""
import copy
from test.core.derivatives.utils import get_available_devices
from typing import Any, Iterator, List, Tuple
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
from backpack import extend
from backpack.utils.subsampling import subsample
def make_test_problems(settings):
"""Creates test problems from settings.
Args:
settings (list[dict]): raw settings of the problems
Returns:
list[ExtensionTestProblem]
"""
problem_dicts = []
for setting in settings:
setting = add_missing_defaults(setting)
devices = setting["device"]
for dev in devices:
problem = copy.deepcopy(setting)
problem["device"] = dev
problem_dicts.append(problem)
return [ExtensionsTestProblem(**p) for p in problem_dicts]
def add_missing_defaults(setting):
"""Create full settings from setting.
Args:
setting (dict): configuration dictionary
Returns:
dict: full settings.
Raises:
ValueError: if no proper settings
"""
required = ["module_fn", "input_fn", "loss_function_fn", "target_fn"]
optional = {
"id_prefix": "",
"seed": 0,
"device": get_available_devices(),
}
for req in required:
if req not in setting.keys():
raise ValueError("Missing configuration entry for {}".format(req))
for opt, default in optional.items():
if opt not in setting.keys():
setting[opt] = default
for s in setting.keys():
if s not in required and s not in optional.keys():
raise ValueError("Unknown config: {}".format(s))
return setting
class ExtensionsTestProblem:
"""Class providing functions and parameters."""
def __init__(
self,
input_fn,
module_fn,
loss_function_fn,
target_fn,
device,
seed,
id_prefix,
):
"""Collection of information required to test extensions.
Args:
input_fn (callable): Function returning the network input.
module_fn (callable): Function returning the network.
loss_function_fn (callable): Function returning the loss module.
target_fn (callable): Function returning the labels.
device (torch.device): Device to run on.
seed (int): Random seed.
id_prefix (str): Extra string added to test id.
"""
self.module_fn = module_fn
self.input_fn = input_fn
self.loss_function_fn = loss_function_fn
self.target_fn = target_fn
self.device = device
self.seed = seed
self.id_prefix = id_prefix
def set_up(self):
"""Set up problem from settings."""
torch.manual_seed(self.seed)
self.model = self.module_fn().to(self.device)
self.input = self.input_fn().to(self.device)
self.target = self.target_fn().to(self.device)
self.loss_function = self.loss_function_fn().to(self.device)
def tear_down(self):
"""Delete all variables after problem."""
del self.model, self.input, self.target, self.loss_function
def make_id(self):
"""Needs to function without call to `set_up`.
Returns:
str: id of problem
"""
prefix = (self.id_prefix + "-") if self.id_prefix != "" else ""
return (
prefix
+ "dev={}-in={}-model={}-loss={}".format(
self.device,
tuple(self.input_fn().shape),
self.module_fn(),
self.loss_function_fn(),
).replace(" ", "")
)
def forward_pass(
self, subsampling: List[int] = None
) -> Tuple[Tensor, Tensor, Tensor]:
"""Do a forward pass. Return input, output, and parameters.
If sub-sampling is None, the forward pass is calculated on the whole batch.
Args:
subsampling: Indices of selected samples. Default: ``None`` (all samples).
Returns:
input, output, and loss of the forward pass
"""
input = self.input.clone()
target = self.target.clone()
if subsampling is not None:
batch_axis = 0
input = subsample(self.input, dim=batch_axis, subsampling=subsampling)
target = subsample(self.target, dim=batch_axis, subsampling=subsampling)
output = self.model(input)
loss = self.loss_function(output, target)
return input, output, loss
def extend(self):
"""Extend module of problem."""
self.model = extend(self.model)
self.loss_function = extend(self.loss_function)
@staticmethod
def __get_reduction_factor(loss: Tensor, unreduced_loss: Tensor) -> float:
"""Return the factor used to reduce the individual losses.
Args:
loss: Reduced loss.
unreduced_loss: Unreduced loss.
Returns:
Reduction factor.
Raises:
RuntimeError: if either mean or sum cannot be determined
"""
mean_loss = unreduced_loss.flatten().mean()
sum_loss = unreduced_loss.flatten().sum()
if torch.allclose(mean_loss, sum_loss):
if unreduced_loss.numel() == 1 and torch.allclose(loss, sum_loss):
factor = 1.0
else:
raise RuntimeError(
"Cannot determine reduction factor. ",
"Results from 'mean' and 'sum' reduction are identical. ",
f"'mean': {mean_loss}, 'sum': {sum_loss}",
)
elif torch.allclose(loss, mean_loss):
factor = 1.0 / unreduced_loss.numel()
elif torch.allclose(loss, sum_loss):
factor = 1.0
else:
raise RuntimeError(
"Reductions 'mean' or 'sum' do not match with loss. ",
f"'mean': {mean_loss}, 'sum': {sum_loss}, loss: {loss}",
)
return factor
def trainable_parameters(self) -> Iterator[Parameter]:
"""Yield the model's trainable parameters.
Yields:
Model parameter with gradients enabled.
"""
for p in self.model.parameters():
if p.requires_grad:
yield p
def collect_data(self, savefield: str) -> List[Any]:
"""Collect BackPACK attributes from trainable parameters.
Args:
savefield: Attribute name.
Returns:
List of attributes saved under the trainable model parameters.
Raises:
RuntimeError: If a non-differentiable parameter with the attribute is
encountered.
"""
data = []
for p in self.model.parameters():
if p.requires_grad:
data.append(getattr(p, savefield))
else:
if hasattr(p, savefield):
raise RuntimeError(
f"Found non-differentiable parameter with attribute '{savefield}'."
)
return data
def get_batch_size(self) -> int:
"""Return the mini-batch size.
Returns:
Mini-batch size.
"""
return self.input.shape[0]
def compute_reduction_factor(self) -> float:
"""Compute loss function's reduction factor for aggregating per-sample losses.
For instance, if ``reduction='mean'`` is used, then the reduction factor
is ``1 / N`` where ``N`` is the batch size. With ``reduction='sum'``, it
is ``1``.
Returns:
Reduction factor
"""
_, _, loss = self.forward_pass()
batch_size = self.get_batch_size()
loss_list = torch.zeros(batch_size, device=self.device)
for n in range(batch_size):
_, _, loss_n = self.forward_pass(subsampling=[n])
loss_list[n] = loss_n
return self.__get_reduction_factor(loss, loss_list)
| 30.263158
| 91
| 0.583602
|
c5fc5d8dae93db4be1e46dc34f7eda19605e83f4
| 2,029
|
py
|
Python
|
lib/python2.7/site-packages/rest_framework/utils/html.py
|
ervinpepic/E-commerce
|
2c15255d1730728cf35c166b9f88cffcb99f5323
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/rest_framework/utils/html.py
|
ervinpepic/E-commerce
|
2c15255d1730728cf35c166b9f88cffcb99f5323
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/rest_framework/utils/html.py
|
ervinpepic/E-commerce
|
2c15255d1730728cf35c166b9f88cffcb99f5323
|
[
"MIT"
] | null | null | null |
"""
Helpers for dealing with HTML input.
"""
import re
from django.utils.datastructures import MultiValueDict
def is_html_input(dictionary):
# MultiDict type datastructures are used to represent HTML form input,
# which may have more than one value for each key.
return hasattr(dictionary, 'getlist')
def parse_html_list(dictionary, prefix=''):
"""
Used to suport list values in HTML forms.
Supports lists of primitives and/or dictionaries.
* List of primitives.
{
'[0]': 'abc',
'[1]': 'def',
'[2]': 'hij'
}
-->
[
'abc',
'def',
'hij'
]
* List of dictionaries.
{
'[0]foo': 'abc',
'[0]bar': 'def',
'[1]foo': 'hij',
'[1]bar': 'klm',
}
-->
[
{'foo': 'abc', 'bar': 'def'},
{'foo': 'hij', 'bar': 'klm'}
]
"""
ret = {}
regex = re.compile(r'^%s\[([0-9]+)\](.*)$' % re.escape(prefix))
for field, value in dictionary.items():
match = regex.match(field)
if not match:
continue
index, key = match.groups()
index = int(index)
if not key:
ret[index] = value
elif isinstance(ret.get(index), dict):
ret[index][key] = value
else:
ret[index] = MultiValueDict({key: [value]})
return [ret[item] for item in sorted(ret.keys())]
def parse_html_dict(dictionary, prefix=''):
"""
Used to support dictionary values in HTML forms.
{
'profile.username': 'example',
'profile.email': 'example@example.com',
}
-->
{
'profile': {
'username': 'example',
'email': 'example@example.com'
}
}
"""
ret = {}
regex = re.compile(r'^%s\.(.+)$' % re.escape(prefix))
for field, value in dictionary.items():
match = regex.match(field)
if not match:
continue
key = match.groups()[0]
ret[key] = value
return ret
| 22.544444
| 74
| 0.507639
|
8a2f1ee05fe4956c711aed0db46d604055ce6e34
| 2,962
|
py
|
Python
|
legged_gym/envs/__init__.py
|
chengxuxin/legged_gym_isaac
|
458b4f70dc748867d1200827285a309d56e58ca7
|
[
"BSD-3-Clause"
] | 1
|
2022-01-06T10:06:56.000Z
|
2022-01-06T10:06:56.000Z
|
legged_gym/envs/__init__.py
|
chengxuxin/legged_gym_isaac
|
458b4f70dc748867d1200827285a309d56e58ca7
|
[
"BSD-3-Clause"
] | null | null | null |
legged_gym/envs/__init__.py
|
chengxuxin/legged_gym_isaac
|
458b4f70dc748867d1200827285a309d56e58ca7
|
[
"BSD-3-Clause"
] | 1
|
2022-03-21T12:13:16.000Z
|
2022-03-21T12:13:16.000Z
|
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2021 ETH Zurich, Nikita Rudin
from legged_gym import LEGGED_GYM_ROOT_DIR, LEGGED_GYM_ENVS_DIR
from legged_gym.envs.a1.a1_config import A1RoughCfg, A1RoughCfgPPO
from legged_gym.envs.a1.a1_rma_config import A1RMARoughCfg, A1RMARoughCfgPPO
from .base.legged_robot import LeggedRobot
from .base.a1_rma import A1RMA
from .anymal_c.anymal import Anymal
from .anymal_c.mixed_terrains.anymal_c_rough_config import AnymalCRoughCfg, AnymalCRoughCfgPPO
from .anymal_c.flat.anymal_c_flat_config import AnymalCFlatCfg, AnymalCFlatCfgPPO
from .anymal_b.anymal_b_config import AnymalBRoughCfg, AnymalBRoughCfgPPO
from .cassie.cassie import Cassie
from .cassie.cassie_config import CassieRoughCfg, CassieRoughCfgPPO
from .a1.a1_config import A1RoughCfg, A1RoughCfgPPO
import os
from legged_gym.utils.task_registry import task_registry
task_registry.register( "anymal_c_rough", Anymal, AnymalCRoughCfg(), AnymalCRoughCfgPPO() )
task_registry.register( "anymal_c_flat", Anymal, AnymalCFlatCfg(), AnymalCFlatCfgPPO() )
task_registry.register( "anymal_b", Anymal, AnymalBRoughCfg(), AnymalBRoughCfgPPO() )
task_registry.register( "a1", LeggedRobot, A1RoughCfg(), A1RoughCfgPPO() )
task_registry.register( "cassie", Cassie, CassieRoughCfg(), CassieRoughCfgPPO() )
task_registry.register( "a1_rma", A1RMA, A1RMARoughCfg(), A1RMARoughCfgPPO() )
| 51.964912
| 98
| 0.810601
|
0252bcbae9e7673ea5ca952863c6d7e8e38b9e8b
| 4,734
|
py
|
Python
|
src/api-engine/api/common/enums.py
|
neocameback/cello-adv
|
2844d580c5d25923b06391bd266c45b3c0972588
|
[
"Apache-2.0"
] | null | null | null |
src/api-engine/api/common/enums.py
|
neocameback/cello-adv
|
2844d580c5d25923b06391bd266c45b3c0972588
|
[
"Apache-2.0"
] | null | null | null |
src/api-engine/api/common/enums.py
|
neocameback/cello-adv
|
2844d580c5d25923b06391bd266c45b3c0972588
|
[
"Apache-2.0"
] | null | null | null |
#
# SPDX-License-Identifier: Apache-2.0
#
from enum import Enum, unique, EnumMeta
import inspect
def separate_upper_class(class_name):
x = ""
i = 0
for c in class_name:
if c.isupper() and not class_name[i - 1].isupper():
x += " %s" % c.lower()
else:
x += c
i += 1
return "_".join(x.strip().split(" "))
class ExtraEnum(Enum):
@classmethod
def get_info(cls, title="", list_str=False):
str_info = """
"""
str_info += title
if list_str:
for name, member in cls.__members__.items():
str_info += """
%s
""" % (
name.lower().replace("_", "."),
)
else:
for name, member in cls.__members__.items():
str_info += """
%s: %s
""" % (
member.value,
name,
)
return str_info
@classmethod
def to_choices(cls, string_as_value=False, separate_class_name=False):
if string_as_value:
choices = [
(name.lower().replace("_", "."), name)
for name, member in cls.__members__.items()
]
elif separate_class_name:
choices = [
(separate_upper_class(name), name)
for name, member in cls.__members__.items()
]
else:
choices = [
(member.value, name)
for name, member in cls.__members__.items()
]
return choices
@classmethod
def values(cls):
return list(map(lambda c: c.value, cls.__members__.values()))
@classmethod
def names(cls):
return [name.lower() for name, _ in cls.__members__.items()]
@unique
class HostStatus(ExtraEnum):
Inactive = 0
Active = 1
@unique
class NetworkStatus(ExtraEnum):
Stopped = 0
Running = 1
Error = 2
@unique
class LogLevel(ExtraEnum):
Info = 0
Warning = 1
Debug = 2
Error = 3
Critical = 4
@unique
class Operation(ExtraEnum):
Start = 0
Stop = 1
Restart = 2
@unique
class NetworkOperation(ExtraEnum):
Join = 0
Leave = 1
@unique
class HostType(ExtraEnum):
Docker = 0
Kubernetes = 1
@unique
class ChannelType(ExtraEnum):
System = 0
Normal = 1
@unique
class NetworkType(ExtraEnum):
Fabric = 0
@unique
class FabricVersions(ExtraEnum):
V1_4 = "1.4"
V1_5 = "1.5"
@unique
class FabricNodeType(ExtraEnum):
Ca = 0
Orderer = 1
Peer = 2
@unique
class NodeStatus(ExtraEnum):
Deploying = 0
Running = 1
Stopped = 2
Deleting = 3
Error = 4
@unique
class NetworkCreateType(ExtraEnum):
New = 0
Import = 1
@unique
class K8SCredentialType(ExtraEnum):
CertKey = 0
Config = 1
UsernamePassword = 2
@unique
class ConsensusPlugin(ExtraEnum):
Solo = 0
Kafka = 1
@unique
class UserRole(ExtraEnum):
Administrator = 0
Operator = 1
User = 2
@unique
class FileType(ExtraEnum):
Certificate = 0
class EnumWithDisplayMeta(EnumMeta):
def __new__(mcs, name, bases, attrs):
display_strings = attrs.get("DisplayStrings")
if display_strings is not None and inspect.isclass(display_strings):
del attrs["DisplayStrings"]
if hasattr(attrs, "_member_names"):
attrs._member_names.remove("DisplayStrings")
obj = super().__new__(mcs, name, bases, attrs)
for m in obj:
m.display_string = getattr(display_strings, m.name, None)
return obj
@unique
class ErrorCode(Enum, metaclass=EnumWithDisplayMeta):
UnknownError = 20000
ValidationError = 20001
ParseError = 20002
ResourceInUse = 20003
ResourceExists = 20004
ResourceNotFound = 20005
PermissionError = 20006
CustomError = 20007
NoResource = 20008
class DisplayStrings:
UnknownError = "Unknown Error."
ValidationError = "Validation parameter error."
ParseError = "Parse error."
ResourceInUse = "Resource is inuse."
ResourceExists = "Resource already exists."
ResourceNotFound = "Request Resource Not found."
PermissionError = "Permission Error."
CustomError = "Custom Error."
NoResource = "Have no available resource."
@classmethod
def get_info(cls):
error_code_str = """
Error Codes:
"""
for name, member in cls.__members__.items():
error_code_str += """
%s: %s
""" % (
member.value,
member.display_string,
)
return error_code_str
| 20.582609
| 76
| 0.566117
|
1326229757a5cc6a9adcc349d258451f4a562414
| 7,392
|
py
|
Python
|
CNNectome/training/synapse_template.py
|
davidackerman/CNNectome
|
2815f96f40f5df66cc0a17e33db1353b1d0b6351
|
[
"BSD-2-Clause"
] | 4
|
2019-06-21T18:06:22.000Z
|
2021-11-29T08:28:46.000Z
|
CNNectome/training/synapse_template.py
|
GenevieveBuckley/CNNectome
|
bde8528ed5adc0a4aefca3b19ecc4c2144f2cbcc
|
[
"BSD-2-Clause"
] | 4
|
2018-12-18T19:31:04.000Z
|
2022-01-10T16:06:45.000Z
|
CNNectome/training/synapse_template.py
|
GenevieveBuckley/CNNectome
|
bde8528ed5adc0a4aefca3b19ecc4c2144f2cbcc
|
[
"BSD-2-Clause"
] | 9
|
2018-03-22T18:17:57.000Z
|
2022-03-24T01:17:21.000Z
|
import logging
from CNNectome.utils.label import *
from CNNectome.networks.mk_dist_unet_with_labels import make_net
from CNNectome.networks import unet_class
from CNNectome.training.anisotropic.train_dist_syn import train_until
from CNNectome.inference.single_block_inference import single_block_inference
from gunpowder import Coordinate
import json
import numpy as np
import argparse
logging.basicConfig(level=logging.INFO)
# running parameters
max_iteration = 500000
cache_size = 5
num_workers = 10
# voxel size parameters
voxel_size = Coordinate((40,4,4))
# network parameters
steps_train = 4
steps_inference = 11
padding = "valid"
constant_upsample = True
trans_equivariant = True
feature_widths_down = [12, 12 * 6, 12 * 6 ** 2, 12 * 6 ** 3]
feature_widths_up = [12 , 12 * 6, 12 * 6 ** 2, 12 * 6 ** 3]
downsampling_factors = [(1, 3, 3), (1, 3, 3) , (3, 3, 3)]
kernel_sizes_down = [
[(1, 3, 3), (1, 3, 3)],
[(1, 3, 3), (1, 3, 3)],
[(3, 3, 3), (3, 3, 3)],
[(3, 3, 3), (3, 3, 3)]
]
kernel_sizes_up = [
[(1, 3, 3), (1, 3, 3)],
[(1, 3, 3), (1, 3, 3)],
[(3, 3, 3), (3, 3, 3)]
]
# additional network parameters for upsampling network
#upsample_factor = tuple(voxel_size_input/voxel_size)
#final_kernel_size = [(3,) * 3, (3,) * 3]
#final_feature_width = 12 * 6
# groundtruth source parameters
cremi_dir = "/groups/saalfeld/saalfeldlab/larissa/data/cremi-2019/"
samples = ["A", "B", "C"]
n5_filename_format = "sample_{0:}.n5"
csv_filename_format = "sample_{0:}_clefts_to_seg.csv"
filter_comments_pre = ["ER", "autapse"]
filter_comments_post = ["apposition", "ER", "autapse"]
include_cleft = False
# training parameters
loss_name = "loss_total"
aug_mode = "deluxe"
# groundtruth construction parameters
min_masked_voxels = 17561.
dt_scaling_factor = 50
labels = list()
labels.append(Label("clefts", None, thr=0., scale_loss=True, separate_labelset=True))
labels.append(Label("pre", None, thr=-0.5, scale_loss=True, separate_labelset=True))
labels.append(Label("post", None, thr=-0.5, scale_loss=True, separate_labelset=True))
def build_net(steps=steps_inference, mode="inference"):
unet = unet_class.UNet(
feature_widths_down,
feature_widths_up,
downsampling_factors,
kernel_sizes_down,
kernel_sizes_up,
padding=padding,
constant_upsample=constant_upsample,
trans_equivariant=trans_equivariant,
input_voxel_size=voxel_size,
input_fov=voxel_size
)
net, input_shape, output_shape = make_net(unet, labels, steps, loss_name=loss_name, mode=mode)
logging.info(
"Built {0:} with input shape {1:} and output_shape {2:}".format(
net, input_shape, output_shape
)
)
return net, input_shape, output_shape
def test_memory_consumption(steps=steps_train, mode="train"):
from CNNectome.utils.test_memory_consumption import Test
net, input_shape, output_shape = build_net(steps, mode=mode)
with open("net_io_names.json", "r") as f:
net_io_names = json.load(f)
input_arrays = dict()
requested_outputs = dict()
input_arrays[net_io_names["raw"]] = np.random.random(
input_shape.astype(np.int)
).astype(np.float32)
for l in labels:
if mode.lower() == "train" or mode.lower() == "training":
input_arrays[net_io_names["mask"]] = np.random.randint(0, 1, output_shape).astype(
np.float32)
input_arrays[net_io_names["gt_" + l.labelname]] = np.random.random(
output_shape
).astype(np.float32)
if l.scale_loss or l.scale_key is not None:
input_arrays[net_io_names["w_" + l.labelname]] = np.random.random(
output_shape
).astype(np.float32)
input_arrays[net_io_names["mask_" + l.labelname]] = np.random.random(
output_shape
).astype(np.float32)
requested_outputs[l.labelname] = net_io_names[l.labelname]
t = Test(
net,
requested_outputs,
net_io_names["optimizer"],
net_io_names["loss_total"],
mode=mode,
)
t.setup()
for it in range(100):
t.train_step(input_arrays, iteration=it + 1)
def train(steps=steps_train):
net_name, input_shape, output_shape = build_net(steps=steps, mode="train")
train_until(
max_iteration,
cremi_dir,
samples,
n5_filename_format,
csv_filename_format,
filter_comments_pre,
filter_comments_post,
labels,
net_name,
input_shape,
output_shape,
loss_name,
aug_mode,
dt_scaling_factor=dt_scaling_factor,
cache_size=cache_size,
num_workers=num_workers,
min_masked_voxels=min_masked_voxels,
voxel_size=voxel_size
)
def inference(steps=steps_inference):
net_name, input_shape, output_shape = build_net(steps=steps, mode="inference")
outputs = [l.labelname for l in labels]
single_block_inference(net_name, input_shape, output_shape, ckpt, outputs, input_file, coordinate=coordinate,
output_file=output_file, voxel_size_input=voxel_size, voxel_size_output=voxel_size)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Build, train or test memory consumption for a U-Net")
parser.add_argument("script", type=str, help="Pick script that should be run",
choices=["train", "build", "test_mem", "inference"], default="train")
parser.add_argument("--mode", type=str, help="for build and test_mem specify whether to run for inference or "
"training network", choices=["training", "inference"])
parser.add_argument("--ckpt", type=str, help="checkpoint file to use for inference")
parser.add_argument("--input_file", type=str, help="n5 file for input data to predict from")
parser.add_argument("--output_file", type=str, help="n5 file to write inference output to", default="prediction.n5")
parser.add_argument("--coordinate", type=int, help="upper left coordinate of block to predict from (input)",
default=(0, 0, 0), nargs='+')
args = parser.parse_args()
mode = args.mode
ckpt = args.ckpt
input_file = args.input_file
output_file = args.output_file
coordinate = tuple(args.coordinate)
if args.script == "train":
if mode == "inference":
raise ValueError("script train should not be run with mode inference")
else:
mode = "training"
elif args.script == "inference":
if mode == "training":
raise ValueError("script inference should not be run with mode training")
else:
mode = "inference"
assert ckpt is not None and input_file is not None, \
"ckpt and input_file need to be given to run inference"
if mode == "inference":
steps = steps_inference
elif mode == "training":
steps = steps_train
else:
raise ValueError("mode needs to be given to run script {0:}".format(args.script))
if args.script == "train":
train(steps)
elif args.script == "build":
build_net(steps, mode)
elif args.script == "test_mem":
test_memory_consumption(steps, mode)
elif args.script == "inference":
inference(steps)
| 35.2
| 120
| 0.653815
|
49f053373997236fc5dd6a3881b55e38e7361d4b
| 3,200
|
py
|
Python
|
src/modules/build_model.py
|
amirgholipour/mlops_project
|
ddd88886c4d887b756c79973ea5524660a2c82e1
|
[
"BSD-3-Clause"
] | null | null | null |
src/modules/build_model.py
|
amirgholipour/mlops_project
|
ddd88886c4d887b756c79973ea5524660a2c82e1
|
[
"BSD-3-Clause"
] | null | null | null |
src/modules/build_model.py
|
amirgholipour/mlops_project
|
ddd88886c4d887b756c79973ea5524660a2c82e1
|
[
"BSD-3-Clause"
] | null | null | null |
from tensorflow.keras.layers import Bidirectional, Dense, Input, LSTM, Embedding
from tensorflow.keras.models import Sequential
class BuildModel():
'''
Build Lstm model for tensorflow
----------
Returns
-------
self.model:
Deep learning based Model
'''
def __init__(self, WORD_INDEX, EMWEIGHTS, EMBEDDING_DIM= 50,MAX_SEQUENCE_LENGTH= 348, LOSS='categorical_crossentropy',OPTIMIZER='rmsprop',METRICS=['acc'],NUM_CLASSES=11,DROP_OUT_RATE =.4,PRE_WEIGHT_FLAG = False):
self.weights = [EMWEIGHTS]
self.input_length = MAX_SEQUENCE_LENGTH
self.embeding_dim = EMBEDDING_DIM
self.loss = LOSS
self.optimizer = OPTIMIZER
self.metrics = METRICS
self.model = []
self.num_classes = NUM_CLASSES
self.drate = DROP_OUT_RATE
self.pre_weight_flag = PRE_WEIGHT_FLAG
self.word_index = WORD_INDEX
def DefineModelWithoutGLOVE(self):
'''
Define the model
----------
Returns
-------
'''
#Bidirectional LSTM
self.model = Sequential()
self.model.add(Embedding(len(self.word_index) + 1,
self.embeding_dim,
input_length=self.input_length ,
trainable=True))
self.model.add(Bidirectional(LSTM(100, dropout = self.drate, return_sequences=True)))
self.model.add(Bidirectional(LSTM(256, dropout = self.drate)))
self.model.add(Dense(self.num_classes,activation='sigmoid'))
# return self.final_set,self.labels, self.enc, self.ohe,self.encoding_flag
def DefineModelWithGLOVE(self):
'''
Define the model
----------
Returns
-------
'''
#Bidirectional LSTM
self.model = Sequential()
self.model.add(Embedding(len(self.word_index) + 1,
self.embeding_dim,
weights=self.weights,
input_length=self.input_length ,
trainable=True))
self.model.add(Bidirectional(LSTM(100, dropout = self.drate, return_sequences=True)))
self.model.add(Bidirectional(LSTM(256, dropout = self.drate)))
self.model.add(Dense(self.num_classes,activation='sigmoid'))
return self.model
# return self.final_set,self.labels, self.enc, self.ohe,self.encoding_flag
def CompileModel(self):
'''
Compile the model
----------
Returns
-------
'''
self.model.compile(loss=self.loss,optimizer=self.optimizer,metrics=self.metrics)
# return self.model
def SetupModel(self):
'''
Build the model
----------
Returns
-------
'''
if self.pre_weight_flag ==True:
self.DefineModelWithGLOVE()
else:
self.DefineModelWithoutGLOVE()
self.CompileModel()
self.model.summary()
return self.model
| 32.323232
| 216
| 0.545313
|
bf958144b7e9bf913e5c3ab050f3bd75d9783ab0
| 11,905
|
py
|
Python
|
tools/test.py
|
zzx0226/mmocr
|
50354895244339a392b4f1af5a35963883923cca
|
[
"Apache-2.0"
] | null | null | null |
tools/test.py
|
zzx0226/mmocr
|
50354895244339a392b4f1af5a35963883923cca
|
[
"Apache-2.0"
] | null | null | null |
tools/test.py
|
zzx0226/mmocr
|
50354895244339a392b4f1af5a35963883923cca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
from mmocr.utils import revert_sync_batchnorm
from mmocr.models import build_detector
from mmocr.datasets import build_dataloader, build_dataset
from mmocr.apis.utils import (disable_text_recog_aug_test, replace_image_to_tensor)
from mmdet.core import encode_mask_results
from mmdet.apis import multi_gpu_test
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, wrap_fp16_model)
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.image import tensor2imgs
from mmcv.cnn import fuse_conv_bn
from mmcv import Config, DictAction
import torch
import mmcv
import warnings
import os.path as osp
import argparse
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def parse_args():
parser = argparse.ArgumentParser(description='MMOCR test (and eval) a model.')
parser.add_argument('config', help='Test config file path.')
parser.add_argument('checkpoint', help='Checkpoint file.')
parser.add_argument('--out', help='Output result file in pickle format.')
parser.add_argument('--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed.')
parser.add_argument('--format-only',
action='store_true',
help='Format the output results without performing evaluation. It is'
'useful when you want to format the results to a specific format and '
'submit them to the test server.')
parser.add_argument('--eval',
type=str,
nargs='+',
help='The evaluation metrics, which depends on the dataset, e.g.,'
'"bbox", "seg", "proposal" for COCO, and "mAP", "recall" for'
'PASCAL VOC.')
parser.add_argument('--show', action='store_true', help='Show results.')
parser.add_argument('--show-dir', help='Directory where the output images will be saved.')
parser.add_argument('--show-score-thr', type=float, default=0.3, help='Score threshold (default: 0.3).')
parser.add_argument('--gpu-collect', action='store_true', help='Whether to use gpu to collect results.')
parser.add_argument('--tmpdir',
help='The tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified.')
parser.add_argument('--cfg-options',
nargs='+',
action=DictAction,
help='Override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into the config file. If the value '
'to be overwritten is a list, it should be of the form of either '
'key="[a,b]" or key=a,b. The argument also allows nested list/tuple '
'values, e.g. key="[(a,b),(c,d)]". Note that the quotation marks '
'are necessary and that no white space is allowed.')
parser.add_argument('--options',
nargs='+',
action=DictAction,
help='Custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument('--eval-options',
nargs='+',
action=DictAction,
help='Custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function.')
parser.add_argument('--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='Options for job launcher.')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError('--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options.')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options.')
args.eval_options = args.options
return args
def single_gpu_test(model, data_loader, show=False, out_dir=None, is_kie=False, show_score_thr=0.3):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
batch_size = len(result)
if show or out_dir:
if is_kie:
img_tensor = data['img'].data[0]
if img_tensor.shape[0] != 1:
raise KeyError('Visualizing KIE outputs in batches is'
'currently not supported.')
gt_bboxes = data['gt_bboxes'].data[0]
img_metas = data['img_metas'].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
for i, img in enumerate(imgs):
h, w, _ = img_metas[i]['img_shape']
img_show = img[:h, :w, :]
if out_dir:
out_file = osp.join(out_dir, img_metas[i]['ori_filename'])
else:
out_file = None
model.module.show_result(img_show, result[i], gt_bboxes[i], show=show, out_file=out_file)
else:
if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
img_tensor = data['img'][0]
else:
img_tensor = data['img'][0].data[0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(img_show, result[i], show=show, out_file=out_file, score_thr=show_score_thr)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result]
results.extend(result)
for _ in range(batch_size):
prog_bar.update()
return results
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show
or args.show_dir), ('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir".')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified.')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if cfg.model.get('pretrained'):
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = (cfg.data.get('test_dataloader', {})).get('samples_per_gpu', cfg.data.get('samples_per_gpu', 1))
if samples_per_gpu > 1:
cfg = disable_text_recog_aug_test(cfg)
cfg = replace_image_to_tensor(cfg)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
# step 1: give default values and override (if exist) from cfg.data
loader_cfg = {
**dict(seed=cfg.get('seed'), drop_last=False, dist=distributed),
**({} if torch.__version__ != 'parrots' else dict(
prefetch_num=2,
pin_memory=False,
)),
**dict((k, cfg.data[k]) for k in [
'workers_per_gpu',
'seed',
'prefetch_num',
'pin_memory',
'persistent_workers',
] if k in cfg.data)
}
test_loader_cfg = {
**loader_cfg,
**dict(shuffle=False, drop_last=False),
**cfg.data.get('test_dataloader', {}),
**dict(samples_per_gpu=samples_per_gpu)
}
data_loader = build_dataloader(dataset, **test_loader_cfg)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
model = revert_sync_batchnorm(model)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
is_kie = cfg.model.type in ['SDMGR']
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, is_kie, args.show_score_thr)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| 45.26616
| 122
| 0.5895
|
545f2db199c4dbf4d15192cf010bb12c20536760
| 219
|
py
|
Python
|
modules/persons/domain/repository/__init__.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 6
|
2020-08-09T23:41:08.000Z
|
2021-03-16T22:05:40.000Z
|
modules/persons/domain/repository/__init__.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 1
|
2020-10-02T02:59:38.000Z
|
2020-10-02T02:59:38.000Z
|
modules/persons/domain/repository/__init__.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 2
|
2021-03-16T22:05:43.000Z
|
2021-04-30T06:35:25.000Z
|
from .person_repository import PersonRepository
from .address_repository import AddressRepository
from .phone_repository import PhoneRepository
__all__ = ('PersonRepository', 'AddressRepository', 'PhoneRepository', )
| 31.285714
| 72
| 0.840183
|
147fcbb5afc9dcb280311afc7f451957ea6fc2ce
| 11,509
|
py
|
Python
|
tests/parse_name_test.py
|
live-clones/pybtex
|
892adfc6c2c55c5d7e3ff6b019b2d1fecdbf6405
|
[
"MIT"
] | 4
|
2021-06-03T17:34:53.000Z
|
2022-03-01T11:16:20.000Z
|
tests/parse_name_test.py
|
live-clones/pybtex
|
892adfc6c2c55c5d7e3ff6b019b2d1fecdbf6405
|
[
"MIT"
] | 1
|
2021-03-25T23:00:28.000Z
|
2021-03-25T23:00:28.000Z
|
tests/parse_name_test.py
|
live-clones/pybtex
|
892adfc6c2c55c5d7e3ff6b019b2d1fecdbf6405
|
[
"MIT"
] | 1
|
2019-11-14T05:07:33.000Z
|
2019-11-14T05:07:33.000Z
|
# vim:fileencoding=utf-8
# Copyright (c) 2006-2021 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import pytest
from pybtex import errors
from pybtex.database import InvalidNameString, Person
# name, (bibtex_first, prelast, last, lineage
# as parsed by the bibtex program itself
sample_names = [
('A. E. Siegman', (['A.', 'E.'], [], ['Siegman'], []), None),
('A. G. W. Cameron', (['A.', 'G.', 'W.'], [], ['Cameron'], []), None),
('A. Hoenig', (['A.'], [], ['Hoenig'], []), None),
('A. J. Van Haagen', (['A.', 'J.', 'Van'], [], ['Haagen'], []), None),
('A. S. Berdnikov', (['A.', 'S.'], [], ['Berdnikov'], []), None),
('A. Trevorrow', (['A.'], [], ['Trevorrow'], []), None),
('Adam H. Lewenberg', (['Adam', 'H.'], [], ['Lewenberg'], []), None),
('Addison-Wesley Publishing Company',
(['Addison-Wesley', 'Publishing'], [], ['Company'], []), None),
('Advogato (Raph Levien)', (['Advogato', '(Raph'], [], ['Levien)'], []), None),
('Andrea de Leeuw van Weenen',
(['Andrea'], ['de', 'Leeuw', 'van'], ['Weenen'], []), None),
('Andreas Geyer-Schulz', (['Andreas'], [], ['Geyer-Schulz'], []), None),
("Andr{\\'e} Heck", (["Andr{\\'e}"], [], ['Heck'], []), None),
('Anne Br{\\"u}ggemann-Klein', (['Anne'], [], ['Br{\\"u}ggemann-Klein'], []), None),
('Anonymous', ([], [], ['Anonymous'], []), None),
('B. Beeton', (['B.'], [], ['Beeton'], []), None),
('B. Hamilton Kelly', (['B.', 'Hamilton'], [], ['Kelly'], []), None),
('B. V. Venkata Krishna Sastry',
(['B.', 'V.', 'Venkata', 'Krishna'], [], ['Sastry'], []), None),
('Benedict L{\\o}fstedt', (['Benedict'], [], ['L{\\o}fstedt'], []), None),
('Bogus{\\l}aw Jackowski', (['Bogus{\\l}aw'], [], ['Jackowski'], []), None),
('Christina A. L.\\ Thiele',
# (['Christina', 'A.', 'L.\\'], [], ['Thiele'], []), None),
(['Christina', 'A.', 'L.'], [], ['Thiele'], []), None), # BibTeX incompatible: treat "\ " as a space
("D. Men'shikov", (['D.'], [], ["Men'shikov"], []), None),
("Darko \\v{Z}ubrini{\\'c}", (['Darko'], [], ["\\v{Z}ubrini{\\'c}"], []), None),
("Dunja Mladeni{\\'c}", (['Dunja'], [], ["Mladeni{\\'c}"], []), None),
('Edwin V. {Bell, II}', (['Edwin', 'V.'], [], ['{Bell, II}'], []), None),
('Frank G. {Bennett, Jr.}', (['Frank', 'G.'], [], ['{Bennett, Jr.}'], []), None),
("Fr{\\'e}d{\\'e}ric Boulanger",
(["Fr{\\'e}d{\\'e}ric"], [], ['Boulanger'], []), None),
('Ford, Jr., Henry', (['Henry'], [], ['Ford'], ['Jr.']), None),
('mr Ford, Jr., Henry', (['Henry'], ['mr'], ['Ford'], ['Jr.']), None),
('Fukui Rei', (['Fukui'], [], ['Rei'], []), None),
('G. Gr{\\"a}tzer', (['G.'], [], ['Gr{\\"a}tzer'], []), None),
('George Gr{\\"a}tzer', (['George'], [], ['Gr{\\"a}tzer'], []), None),
('Georgia K. M. Tobin', (['Georgia', 'K.', 'M.'], [], ['Tobin'], []), None),
('Gilbert van den Dobbelsteen',
(['Gilbert'], ['van', 'den'], ['Dobbelsteen'], []), None),
('Gy{\\"o}ngyi Bujdos{\\\'o}', (['Gy{\\"o}ngyi'], [], ["Bujdos{\\'o}"], []), None),
('Helmut J{\\"u}rgensen', (['Helmut'], [], ['J{\\"u}rgensen'], []), None),
('Herbert Vo{\\ss}', (['Herbert'], [], ['Vo{\\ss}'], []), None),
("H{\\'a}n Th{\\^e}\\llap{\\raise 0.5ex\\hbox{\\'{\\relax}}} Th{\\'a}nh",
(["H{\\'a}n", "Th{\\^e}\\llap{\\raise 0.5ex\\hbox{\\'{\\relax}}}"],
[],
["Th{\\'a}nh"],
[]), None),
("H{\\`a}n Th\\^e\\llap{\\raise0.5ex\\hbox{\\'{\\relax}}} Th{\\`a}nh",
(['H{\\`a}n', "Th\\^e\\llap{\\raise0.5ex\\hbox{\\'{\\relax}}}"],
[],
['Th{\\`a}nh'],
[]), None),
("J. Vesel{\\'y}", (['J.'], [], ["Vesel{\\'y}"], []), None),
("Javier Rodr\\'{\\i}guez Laguna",
(['Javier', "Rodr\\'{\\i}guez"], [], ['Laguna'], []), None),
("Ji\\v{r}\\'{\\i} Vesel{\\'y}",
(["Ji\\v{r}\\'{\\i}"], [], ["Vesel{\\'y}"], []), None),
("Ji\\v{r}\\'{\\i} Zlatu{\\v{s}}ka",
(["Ji\\v{r}\\'{\\i}"], [], ['Zlatu{\\v{s}}ka'], []), None),
("Ji\\v{r}{\\'\\i} Vesel{\\'y}",
(["Ji\\v{r}{\\'\\i}"], [], ["Vesel{\\'y}"], []), None),
("Ji\\v{r}{\\'{\\i}}Zlatu{\\v{s}}ka",
([], [], ["Ji\\v{r}{\\'{\\i}}Zlatu{\\v{s}}ka"], []), None),
('Jim Hef{}feron', (['Jim'], [], ['Hef{}feron'], []), None),
('J{\\"o}rg Knappen', (['J{\\"o}rg'], [], ['Knappen'], []), None),
('J{\\"o}rgen L. Pind', (['J{\\"o}rgen', 'L.'], [], ['Pind'], []), None),
("J{\\'e}r\\^ome Laurens", (["J{\\'e}r\\^ome"], [], ['Laurens'], []), None),
('J{{\\"o}}rg Knappen', (['J{{\\"o}}rg'], [], ['Knappen'], []), None),
('K. Anil Kumar', (['K.', 'Anil'], [], ['Kumar'], []), None),
("Karel Hor{\\'a}k", (['Karel'], [], ["Hor{\\'a}k"], []), None),
("Karel P\\'{\\i}{\\v{s}}ka", (['Karel'], [], ["P\\'{\\i}{\\v{s}}ka"], []), None),
("Karel P{\\'\\i}{\\v{s}}ka", (['Karel'], [], ["P{\\'\\i}{\\v{s}}ka"], []), None),
("Karel Skoup\\'{y}", (['Karel'], [], ["Skoup\\'{y}"], []), None),
("Karel Skoup{\\'y}", (['Karel'], [], ["Skoup{\\'y}"], []), None),
('Kent McPherson', (['Kent'], [], ['McPherson'], []), None),
('Klaus H{\\"o}ppner', (['Klaus'], [], ['H{\\"o}ppner'], []), None),
('Lars Hellstr{\\"o}m', (['Lars'], [], ['Hellstr{\\"o}m'], []), None),
('Laura Elizabeth Jackson',
(['Laura', 'Elizabeth'], [], ['Jackson'], []), None),
("M. D{\\'{\\i}}az", (['M.'], [], ["D{\\'{\\i}}az"], []), None),
('M/iche/al /O Searc/oid', (['M/iche/al', '/O'], [], ['Searc/oid'], []), None),
("Marek Ry{\\'c}ko", (['Marek'], [], ["Ry{\\'c}ko"], []), None),
('Marina Yu. Nikulina', (['Marina', 'Yu.'], [], ['Nikulina'], []), None),
("Max D{\\'{\\i}}az", (['Max'], [], ["D{\\'{\\i}}az"], []), None),
('Merry Obrecht Sawdey', (['Merry', 'Obrecht'], [], ['Sawdey'], []), None),
("Miroslava Mis{\\'a}kov{\\'a}",
(['Miroslava'], [], ["Mis{\\'a}kov{\\'a}"], []), None),
('N. A. F. M. Poppelier', (['N.', 'A.', 'F.', 'M.'], [], ['Poppelier'], []), None),
('Nico A. F. M. Poppelier',
(['Nico', 'A.', 'F.', 'M.'], [], ['Poppelier'], []), None),
('Onofrio de Bari', (['Onofrio'], ['de'], ['Bari'], []), None),
("Pablo Rosell-Gonz{\\'a}lez", (['Pablo'], [], ["Rosell-Gonz{\\'a}lez"], []), None),
('Paco La Bruna', (['Paco', 'La'], [], ['Bruna'], []), None),
('Paul Franchi-Zannettacci',
(['Paul'], [], ['Franchi-Zannettacci'], []), None),
('Pavel \\v{S}eve\\v{c}ek', (['Pavel'], [], ['\\v{S}eve\\v{c}ek'], []), None),
('Petr Ol{\\v{s}}ak', (['Petr'], [], ['Ol{\\v{s}}ak'], []), None),
("Petr Ol{\\v{s}}{\\'a}k", (['Petr'], [], ["Ol{\\v{s}}{\\'a}k"], []), None),
('Primo\\v{z} Peterlin', (['Primo\\v{z}'], [], ['Peterlin'], []), None),
('Prof. Alban Grimm', (['Prof.', 'Alban'], [], ['Grimm'], []), None),
("P{\\'e}ter Husz{\\'a}r", (["P{\\'e}ter"], [], ["Husz{\\'a}r"], []), None),
("P{\\'e}ter Szab{\\'o}", (["P{\\'e}ter"], [], ["Szab{\\'o}"], []), None),
('Rafa{\\l}\\.Zbikowski', ([], [], ['Rafa{\\l}\\.Zbikowski'], []), None),
('Rainer Sch{\\"o}pf', (['Rainer'], [], ['Sch{\\"o}pf'], []), None),
('T. L. (Frank) Pappas', (['T.', 'L.', '(Frank)'], [], ['Pappas'], []), None),
('TUG 2004 conference', (['TUG', '2004'], [], ['conference'], []), None),
# von part with BibTeX special characters
('TUG {\\sltt DVI} Driver Standards Committee',
(['TUG', '{\\sltt DVI}', 'Driver', 'Standards'], [], ['Committee'], []), None),
('TUG {\\sltt xDVIx} Driver Standards Committee',
(['TUG'], ['{\\sltt xDVIx}'], ['Driver', 'Standards', 'Committee'], []), None),
('University of M{\\"u}nster',
(['University'], ['of'], ['M{\\"u}nster'], []), None),
('Walter van der Laan', (['Walter'], ['van', 'der'], ['Laan'], []), None),
('Wendy G. McKay', (['Wendy', 'G.'], [], ['McKay'], []), None),
('Wendy McKay', (['Wendy'], [], ['McKay'], []), None),
('W{\\l}odek Bzyl', (['W{\\l}odek'], [], ['Bzyl'], []), None),
('\\LaTeX Project Team', (['\\LaTeX', 'Project'], [], ['Team'], []), None),
('\\rlap{Lutz Birkhahn}', ([], [], ['\\rlap{Lutz Birkhahn}'], []), None),
('{Jim Hef{}feron}', ([], [], ['{Jim Hef{}feron}'], []), None),
('{Kristoffer H\\o{}gsbro Rose}',
([], [], ['{Kristoffer H\\o{}gsbro Rose}'], []), None),
('{TUG} {Working} {Group} on a {\\TeX} {Directory} {Structure}',
(['{TUG}', '{Working}', '{Group}'],
['on', 'a'],
['{\\TeX}', '{Directory}', '{Structure}'],
[]), None),
('{The \\TUB{} Team}', ([], [], ['{The \\TUB{} Team}'], []), None),
('{\\LaTeX} project team', (['{\\LaTeX}'], ['project'], ['team'], []), None),
('{\\NTG{} \\TeX{} future working group}',
([], [], ['{\\NTG{} \\TeX{} future working group}'], []), None),
('{{\\LaTeX\\,3} Project Team}',
([], [], ['{{\\LaTeX\\,3} Project Team}'], []), None),
('Johansen Kyle, Derik Mamania M.',
(['Derik', 'Mamania', 'M.'], [], ['Johansen', 'Kyle'], []), None),
("Johannes Adam Ferdinand Alois Josef Maria Marko d'Aviano "
'Pius von und zu Liechtenstein',
(['Johannes', 'Adam', 'Ferdinand', 'Alois', 'Josef', 'Maria', 'Marko'],
["d'Aviano", 'Pius', 'von', 'und', 'zu'], ['Liechtenstein'],[]), None),
(r'Brand\~{a}o, F', (['F'], [], [r'Brand\~{a}o'], []), None),
# but BibTeX parses it like this:
# (r'Brand\~{a}o, F', (['F'], [], ['Brand\\', '{a}o'], []), None),
# incorrectly formatted name strings below
# too many commas
('Chong, B. M., Specia, L., & Mitkov, R.',
(['Specia', 'L.', '&', 'Mitkov', 'R.'], [], ['Chong'], ['B.', 'M.']),
[InvalidNameString('Chong, B. M., Specia, L., & Mitkov, R.')]
),
# too many commas, sloppy whitespace
('LeCun, Y. , Bottou, L . , Bengio, Y. , Haffner , P',
(['Bottou', 'L', '.', 'Bengio', 'Y.', 'Haffner', 'P'], [], ['LeCun'], ['Y.']),
[InvalidNameString('LeCun, Y. , Bottou, L . , Bengio, Y. , Haffner , P')]),
]
@pytest.mark.parametrize(
["name", "correct_result", "expected_errors"],
sample_names
)
def test_parse_name(name, correct_result, expected_errors):
if expected_errors is None:
expected_errors = []
with errors.capture() as captured_errors:
person = Person(name)
result = (person.bibtex_first_names, person.prelast_names, person.last_names, person.lineage_names)
assert result == correct_result
assert captured_errors == expected_errors
| 54.804762
| 105
| 0.464593
|
547ac58bbc49ba3310f0776ef064e5e531795e2b
| 1,895
|
py
|
Python
|
zunclient/tests/unit/base.py
|
wanghuiict/python-zunclient
|
42d1a73eb01bda846f4815fee0cdefcdde1da081
|
[
"Apache-2.0"
] | 15
|
2016-07-15T02:22:19.000Z
|
2019-02-26T08:55:10.000Z
|
zunclient/tests/unit/base.py
|
wanghuiict/python-zunclient
|
42d1a73eb01bda846f4815fee0cdefcdde1da081
|
[
"Apache-2.0"
] | 2
|
2019-03-01T20:42:38.000Z
|
2020-09-26T06:06:03.000Z
|
zunclient/tests/unit/base.py
|
wanghuiict/python-zunclient
|
42d1a73eb01bda846f4815fee0cdefcdde1da081
|
[
"Apache-2.0"
] | 6
|
2017-01-17T12:29:58.000Z
|
2021-07-12T21:29:34.000Z
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import testtools
_TRUE_VALUES = ('true', '1', 'yes')
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger())
| 35.754717
| 76
| 0.692876
|
5462e5d12825f3b3c154e99c672395cbfe1d911e
| 7,275
|
py
|
Python
|
gen/hooks.py
|
stmobo/robotpy-ctre
|
7ef83ae6f6bc0d5ef793dfb6e96ac5d641320aeb
|
[
"Apache-2.0"
] | null | null | null |
gen/hooks.py
|
stmobo/robotpy-ctre
|
7ef83ae6f6bc0d5ef793dfb6e96ac5d641320aeb
|
[
"Apache-2.0"
] | null | null | null |
gen/hooks.py
|
stmobo/robotpy-ctre
|
7ef83ae6f6bc0d5ef793dfb6e96ac5d641320aeb
|
[
"Apache-2.0"
] | null | null | null |
import re
_annotations = {
'short': 'int',
'int': 'int',
'uint32_t': 'int',
'double': 'float',
'char': 'str',
'bool': 'bool',
'ctre::phoenix::ErrorCode': 'int',
}
def _gen_check(pname, ptype):
# TODO: This does checks on normal types, but if you pass a ctypes value
# in then this does not check those properly.
if ptype == 'bool':
return 'isinstance(%s, bool)' % pname
elif ptype in ['float', 'double']:
return 'isinstance(%s, (int, float))' % pname
#elif ptype is C.c_char:
# return 'isinstance(%s, bytes) and len(%s) == 1' % (pname, pname)
#elif ptype is C.c_wchar:
# return 'isinstance(%s, str) and len(%s) == 1' % (pname, pname)
#elif ptype is C.c_char_p:
# return "%s is None or isinstance(%s, bytes) or getattr(%s, '_type_') is _C.c_char" % (pname, pname, pname)
#elif ptype is C.c_wchar_p:
# return '%s is None or isinstance(%s, bytes)' % (pname, pname)
elif ptype in ['int', 'long']:
return 'isinstance(%s, int)' % pname
#elif ptype in [C.c_byte, C.c_int8]:
# return 'isinstance(%s, int) and %s < %d and %s > -%d' % (pname, pname, 1<<7, pname, 1<<7)
elif ptype in ['short', 'int16_t']:
return 'isinstance(%s, int) and %s < %d and %s > -%d' % (pname, pname, 1<<15, pname, 1<<15)
elif ptype == 'int32_t':
return 'isinstance(%s, int) and %s < %d and %s > -%d' % (pname, pname, 1<<31, pname, 1<<31)
elif ptype == 'int64_t':
return 'isinstance(%s, int) and %s < %d and %s > -%d' % (pname, pname, 1<<63, pname, 1<<63)
elif ptype == 'size_t':
return 'isinstance(%s, int)' % (pname)
elif ptype == 'uint8_t':
return 'isinstance(%s, int) and %s < %d and %s >= 0' % (pname, pname, 1<<8, pname)
elif ptype == 'uint16_t':
return 'isinstance(%s, int) and %s < %d and %s >= 0' % (pname, pname, 1<<16, pname)
elif ptype == 'uint32_t':
return 'isinstance(%s, int) and %s < %d and %s >= 0' % (pname, pname, 1<<32, pname)
elif ptype == 'uint64_t':
return 'isinstance(%s, int) and %s < %d and %s >= 0' % (pname, pname, 1<<64, pname)
elif ptype is None:
return '%s is None' % pname
else:
# TODO: do validation here
#return 'isinstance(%s, %s)' % (pname, type(ptype).__name__)
return None
def _to_annotation(ctypename):
return _annotations[ctypename]
def header_hook(header, data):
'''Called for each header'''
# fix enum names
for e in header.enums:
ename = e['name'].split('_')[0] + '_'
for v in e['values']:
name = v['name']
if name.startswith(ename):
name = name[len(ename):]
if name == 'None':
name = 'None_'
try:
int(name[0])
name = v['name'][0] + name
except ValueError:
pass
v['x_name'] = name
def function_hook(fn, data):
'''Called for each function in the header'''
# only output functions if a module name is defined
if 'module_name' not in data:
return
# Mangle the name appropriately
m = re.match(r'c_%s_(.*)' % data['module_name'], fn['name'])
if not m:
raise Exception("Unexpected fn %s" % fn['name'])
# Python exposed function name conveerted to camelcase
x_name = m.group(1)
x_name = x_name[0].lower() + x_name[1:]
x_in_params = []
x_out_params = []
x_rets = []
# Simulation assertions
x_param_checks = []
x_return_checks = []
param_offset = 0 if x_name.startswith('create') else 1
for i, p in enumerate(fn['parameters'][param_offset:]):
if p['name'] == '':
p['name'] = 'param%s' % i
p['x_type'] = p['raw_type']
p['x_callname'] = p['name']
# Python annotations for sim
p['x_pyann_type'] = _to_annotation(p['raw_type'])
p['x_pyann'] = '%(name)s: %(x_pyann_type)s' % p
if p['pointer']:
p['x_callname'] = '&%(x_callname)s' % p
x_out_params.append(p)
elif p['array']:
asz = p.get('array_size', 0)
if asz:
p['x_pyann_type'] = 'typing.List[%s]' % _to_annotation(p['raw_type'])
p['x_type'] = 'std::array<%s, %s>' % (p['x_type'], asz)
p['x_callname'] = '%(x_callname)s.data()' % p
else:
# it's a vector
pass
x_out_params.append(p)
else:
chk = _gen_check(p['name'], p['raw_type'])
if chk:
x_param_checks.append('assert %s' % chk)
x_in_params.append(p)
p['x_decl'] = '%s %s' % (p['x_type'], p['name'])
x_callstart = ''
x_callend = ''
x_wrap_return = ''
# Return all out parameters
x_rets.extend(x_out_params)
# if the function has out parameters and if the return value
# is an error code, suppress the error code. This matches the Java
# APIs, and the user can retrieve the error code from getLastError if
# they really care
if (not len(x_rets) or fn['returns'] != 'ctre::phoenix::ErrorCode') and \
fn['returns'] != 'void':
x_callstart = 'auto __ret ='
x_rets.insert(0, dict(
name='__ret',
x_type=fn['returns'],
x_pyann_type=_to_annotation(fn['returns']),
))
# Save some time in the common case -- set the error code to 0
# if there's a single retval and the type is ErrorCode
if len(x_rets) == 1 and fn['returns'] == 'ctre::phoenix::ErrorCode':
x_param_checks.append('retval = 0')
if len(x_rets) == 1 and x_rets[0]['x_type'] != 'void':
x_wrap_return = 'return %s;' % x_rets[0]['name']
x_wrap_return_type = x_rets[0]['x_type']
x_pyann_ret = x_rets[0]['x_pyann_type']
elif len(x_rets) > 1:
x_pyann_ret = 'typing.Tuple[%s]' % (
', '.join([p['x_pyann_type'] for p in x_rets]),
)
x_wrap_return = 'return std::make_tuple(%s);' % ','.join([p['name'] for p in x_rets])
x_wrap_return_type = 'std::tuple<%s>' % (', '.join([p['x_type'] for p in x_rets]))
x_return_checks.append('assert isinstance(retval, tuple) and len(retval) == %s' % len(x_rets))
for _p in x_rets:
chk = _gen_check(_p['name'], _p['raw_type'])
if chk:
x_return_checks.append('assert %s' % chk)
else:
x_pyann_ret = 'None'
x_wrap_return_type = 'void'
# return value checkign
# Temporary values to store out parameters in
x_temprefs = ''
if x_out_params:
x_temprefs = \
';'.join([
'%(x_type)s %(name)s' % p for p in x_out_params
]) + ';'
py_self_comma = ', ' if x_in_params else ''
data = data.get('data', {}).get(fn['name'])
if data is None:
# ensure every function is in our yaml
print('WARNING', fn['name'])
data = {}
#assert False, fn['name']
# Rename internal functions
if data.get('internal', False):
x_name = '_' + x_name
name = fn['name']
# lazy :)
fn.update(locals())
| 33.219178
| 115
| 0.535258
|
1501af3155ef0c8112273428a2ed43c1d91f139c
| 3,236
|
py
|
Python
|
profiles_project/settings.py
|
stuntbadger/profiles-rest-api
|
1e4af74a458c40c9a8e015f7fdfeefa4168af1d6
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
stuntbadger/profiles-rest-api
|
1e4af74a458c40c9a8e015f7fdfeefa4168af1d6
|
[
"MIT"
] | 4
|
2021-03-19T11:24:50.000Z
|
2021-06-10T20:20:41.000Z
|
profiles_project/settings.py
|
stuntbadger/profiles-rest-api
|
1e4af74a458c40c9a8e015f7fdfeefa4168af1d6
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5jbf^s4%ea!jf1e%p%@v__uex^5jco^acqbs!%_$(4%4ht)(5i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.68254
| 91
| 0.699011
|
2157ccd94881b4eadcdfe6a5bdd24a03e5808694
| 19,635
|
py
|
Python
|
main.py
|
majid-farhadloo/SAMCNet_2022
|
10f287890745e15974c9041ec32fc852eb43e802
|
[
"MIT"
] | null | null | null |
main.py
|
majid-farhadloo/SAMCNet_2022
|
10f287890745e15974c9041ec32fc852eb43e802
|
[
"MIT"
] | null | null | null |
main.py
|
majid-farhadloo/SAMCNet_2022
|
10f287890745e15974c9041ec32fc852eb43e802
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from audioop import avg
import sys
# sys.path.append('add you root path') # Root path
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from model import PointNet, DGCNN
from PRNet.UMichPathology.pathology_classifier import PathologyClassifier
from PRNet.UMichPathology.train_process import get_batch_preds
from SAMCNet import SpatialDGCNN
import numpy as np
from torch.utils.data import DataLoader
from util import cal_loss, IOStream
import sklearn.metrics as metrics
import pandas as pd
import time as time
from datetime import datetime
import uuid # For filename hashing
import dataset, transforms
import torchvision.transforms
import pickle
import warnings
warnings.filterwarnings("ignore")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def _init_():
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
if not os.path.exists('checkpoints/'+args.exp_name):
os.makedirs('checkpoints/'+args.exp_name)
if not os.path.exists('checkpoints/'+args.exp_name+'/'+'models'):
os.makedirs('checkpoints/'+args.exp_name+'/'+'models')
# os.system('cp main.py checkpoints'+'/'+args.exp_name+'/'+'main.py.backup')
# os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup')
# os.system('cp util.py checkpoints' + '/' + args.exp_name + '/' + 'util.py.backup')
# os.system('cp data.py checkpoints' + '/' + args.exp_name + '/' + 'data.py.backup')
def train(args, io):
'''
See __main__ for args parser
'''
args.transformed_samples = 5
print(f'Using {args.dataset} dataset')
if args.dataset == 'region':
in_file = 'datasets/BestClassification_July2021_14Samples.tsv'
df = dataset.read_tsv(in_file, use_margins=False)
label_name = 'Pathology'
output_channels = 3
elif args.dataset == 'margin':
in_file = 'datasets/BestClassification_July2021_14Samples.tsv'
train_path = 'datasets/Interface/train.csv'
df = dataset.read_dataset(in_file, train_path)
label_name = 'Status'
output_channels = 2
elif args.dataset == 'tumor_core':
in_file = 'datasets/BestClassification_July2021_14Samples.tsv'
train_path = 'datasets/Tumor/train.csv'
df = dataset.read_dataset(in_file, train_path)
label_name = 'Status'
output_channels = 2
elif args.dataset == 'diseases':
in_file = 'datasets/disease.csv'
train_path = 'datasets/Disease/train.csv'
df = dataset.read_dataset(in_file, train_path, dataset='disease')
label_name = 'Status'
output_channels = 2
class_labels = list(df[label_name].cat.categories)
num_classes = len(df.Phenotype.cat.categories)
train_set = dataset.PathologyDataset(dataset=df, label_name=label_name, num_points=args.num_points,
transforms=torchvision.transforms.Compose([transforms.RotateData(n_rotations=30)]), transformed_samples=args.transformed_samples)
sampler = dataset.get_sampler(train_set.dataset, label_name, class_labels, args.transformed_samples)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, collate_fn=dataset.collate_fn_pad, drop_last=True, sampler=sampler)
#Try to load models
if args.model == 'pointnet':
model = PointNet(args, output_channels=output_channels).to(device)
elif args.model == 'dgcnn':
model = DGCNN(args, output_channels=output_channels).to(device)
elif args.model == 'sdgcnn':
model = SpatialDGCNN(args, num_classes=num_classes, output_channels=output_channels).to(device)
elif args.model == 'srnet':
args.min_grid_scale=1
args.max_grid_scale=100
args.grid_scale_count=10
args.neighborhood_distance=50
args.feature_type_count=12
args.lr=0.0001 # the learning rate is different from the others
args.regularization_weight=100
args.diff_weight=1e-3
args.sampling_ratio=1
args.output_classes=output_channels
model = PathologyClassifier(args.feature_type_count, args.grid_scale_count, output_channels).to(device)
else:
raise Exception("Not implemented")
print(str(model))
if args.model != 'srnet': # srnet doesn't support multi-GPUs
model = nn.DataParallel(model)
if args.use_sgd:
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4) # NOTE: GAT uses lr=0.005, weight_decay=5e-4
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
criterion = cal_loss
best_train_acc = 0
best_valid_acc = 0
columns = ["train_average", "train_overall", "train_loss", "train_times", "train_pred", "train_true"]
'''Output result to CSV'''
df = pd.DataFrame(columns=columns)
for epoch in range(args.epochs):
train_average, train_losses, train_overall, train_times = [], [], [], []
start_time = time.time()
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
if args.model == 'srnet':
model.epoch = epoch
else:
model.module.epoch = epoch
train_pred = []
train_true = []
valid_loss = 0.0
valid_pred = []
valid_true = []
for data, label, ls in train_loader:
data, label = data.to(device), label.to(device).squeeze()
batch_size = data.size()[1] #
if args.model == 'srnet': # SRNet has unique get-pred calculation as well as customized loss function.
opt.zero_grad()
batch_pred_ys, batch_pred_prs, batch_true_ys = get_batch_preds(model, args, data, label, ls)
pr_diff_sum = 0
for pred_pr in batch_pred_prs:
pr_diff_sum += 1 / (torch.norm(pred_pr, 1) + 1e-5)
paras = torch.cat([x.view(-1) for x in model.parameters()])
regularization = torch.norm(paras, 1) / (paras.shape[0] + 1)
ce = criterion(batch_pred_ys, batch_true_ys)
loss = ce + args.regularization_weight * regularization + args.diff_weight * pr_diff_sum
loss.backward()
opt.step()
preds = batch_pred_ys.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(batch_true_ys.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
else:
data = data.permute(1, 2, 0)
opt.zero_grad()
logits, _ , _ = model(data)
loss = criterion(logits, label)
loss.backward()
opt.step()
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
train_acc = metrics.accuracy_score(train_true, train_pred)
scheduler.step()
if train_acc > best_train_acc:
best_train_acc = train_acc
torch.save(model.state_dict(), f'checkpoints/{args.exp_name}/models/{args.exp_name}.t7')
avg_per_class_train_acc = metrics.balanced_accuracy_score(train_true, train_pred)
train_times.append(time.time()-start_time)
train_overall.append(train_acc)
train_average.append(avg_per_class_train_acc)
train_losses.append(train_loss*1.0/count)
io.cprint(f'{datetime.now().strftime("%H:%M:%S")}: Epoch {epoch}')
outstr = 'Train loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
train_loss*1.0/count, train_acc, avg_per_class_train_acc)
io.cprint(outstr)
torch.cuda.empty_cache()
csv = {
'train_average': train_average,
'train_overall': train_overall,
'train_loss': train_losses,
'train_times': train_times,
'train_pred': [train_pred],
'train_true': [train_true]
}
df = df.append(csv, ignore_index=True)
# saving the dataframe
df.to_csv(args.save_train_results + args.exp_name + "_results.csv") #
valid_df = pd.DataFrame()
model.eval()
####################
# Validation
####################
if args.dataset == 'tumor_core':
in_file = 'datasets/BestClassification_July2021_14Samples.tsv'
sub_path = 'datasets/Tumor/validation.csv'
valid_df = dataset.read_dataset(in_file, sub_path)
label_name = 'Status'
output_channels = 2
num_points = 1024
elif args.dataset == 'margin':
in_file = 'datasets/BestClassification_July2021_14Samples.tsv'
sub_path = 'datasets/Interface/validation.csv'
valid_df = dataset.read_dataset(in_file, sub_path)
label_name = 'Status'
output_channels = 2
num_points = 1024
elif args.dataset == 'diseases':
in_file = 'datasets/disease.csv'
sub_path = 'datasets/Disease/validation.csv'
valid_df = dataset.read_dataset(in_file, sub_path, dataset="disease")
label_name = 'Status'
output_channels = 2
num_points = 1024
class_labels = list(valid_df[label_name].cat.categories)
num_classes = len(valid_df.Phenotype.cat.categories)
validation_set = dataset.PathologyDataset(dataset=valid_df, label_name=label_name, num_points=args.num_points,
transforms=torchvision.transforms.Compose([transforms.RotateData(n_rotations=10)]), transformed_samples=args.transformed_samples)
sampler = dataset.get_sampler(validation_set.dataset, label_name, class_labels, args.transformed_samples)
valid_loader = torch.utils.data.DataLoader(validation_set, batch_size=args.batch_size, collate_fn=dataset.collate_fn_pad, drop_last=True, sampler=sampler)
for data, label, ls in valid_loader:
data, label = data.to(device), label.to(device).squeeze()
if args.model == 'srnet':
batch_pred_ys, batch_pred_prs, batch_true_ys = get_batch_preds(model, args, data, label, ls)
preds = batch_pred_ys.max(dim=1)[1]
valid_true.append(batch_true_ys.cpu().numpy())
valid_pred.append(preds.detach().cpu().numpy())
else:
data = data.permute(1, 2, 0)
logits, _, _ = model(data)
preds = logits.max(dim=1)[1]
valid_true.append(label.cpu().numpy())
valid_pred.append(preds.detach().cpu().numpy())
valid_true = np.concatenate(valid_true)
valid_pred = np.concatenate(valid_pred)
valid_acc = metrics.accuracy_score(valid_true, valid_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(valid_true, valid_pred)
outstr = 'Validation :: valid acc: %.6f, valid avg acc: %.6f'%(valid_acc, avg_per_class_acc)
if valid_acc>best_valid_acc:
best_valid_acc = valid_acc
torch.save(model.state_dict(), f'checkpoints/{args.exp_name}/models/validation_{args.exp_name}.t7')
io.cprint(outstr)
torch.cuda.empty_cache()
def test(args, io):
if args.dataset == 'tumor_core':
in_file = 'datasets/BestClassification_July2021_14Samples.tsv'
tumor_core_file = 'datasets/Tumor/train.csv'
df = dataset.read_dataset(in_file, tumor_core_file)
# label_name = 'Tumor_Core'
label_name = 'Status'
output_channels = 2
num_points = 1024
elif args.dataset == 'margin':
in_file = 'datasets/BestClassification_July2021_14Samples.tsv'
sub_path = 'datasets/Interface/train.csv'
df = dataset.read_dataset(in_file, sub_path)
label_name = 'Status'
output_channels = 2
num_points = 1024
elif args.dataset == 'disease':
in_file = 'datasets/disease.csv'
sub_path = 'datasets/Disease/train.csv'
df = dataset.read_dataset(in_file, sub_path, dataset='disease')
label_name = 'Status'
output_channels = 2
num_points = 1024
num_classes = len(df.Phenotype.cat.categories)
test_set = dataset.PathologyDataset(dataset=df, label_name=label_name, num_points=args.num_points)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.test_batch_size, collate_fn=dataset.collate_fn_pad, drop_last=True)
device = torch.device("cuda" if args.cuda else "cpu")
#Try to load models
model = SpatialDGCNN(args, num_classes=num_classes, output_channels=output_channels).to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_acc = 0.0
test_true = []
test_pred = []
attention_1 = {}
attention_2 = {}
attention_3 = {}
attention_4 = {}
conv_layer_5 = {}
point_pairs_add = {}
mini_batch = {}
count = 0
for data, label, _ in test_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(1, 2, 0)
logits, a_x_4, point_pairs = model(data)
############################################################################
'''
Extracting features for other classifiers
'''
# attention_1[count], attention_2[count], attention_3[count], conv_layer_5[count] = a_x_1, a_x_2, a_x_3, x_5
attention_4[count] = a_x_4
mini_batch[count] = data
point_pairs_add[count] = point_pairs
############################################################################
print(torch.cuda.memory_allocated()/10**9, torch.cuda.memory_reserved()/10**9)
count+=1
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = 'Test :: test acc: %.6f, test avg acc: %.6f'%(test_acc, avg_per_class_acc)
pickle.dump(attention_4, open( args.save_features + "att4_train_2.p", "wb" ))
pickle.dump(point_pairs_add, open( args.save_features + "stack_add_train_2.p", "wb" ))
pickle.dump(mini_batch, open( args.save_features + "mini_batches_2.p", "wb" ))
io.cprint(outstr)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Spatial DGCNN')
parser.add_argument('--exp_name', type=str, default='exp', metavar='N',
help='Name of the experiment')
parser.add_argument('--dataset', type=str, default='tumor_core', metavar='N',
choices=['region', 'margin', 'tumor_core', 'diseases'],
help='Dataset to use, [region, margin, tumor_core, diseases]')
parser.add_argument('--model', type=str, default='sdgcnn', metavar='N',
choices=['pointnet', 'dgcnn', 'sdgcnn', 'srnet'],
help='Model to use, [pointnet, dgcnn, sdgcnn]')
parser.add_argument('--train_model', type=str, default='entire_model', metavar='N',
choices=['PE', 'neighbor_att', 'self_att', 'PE_self_att', 'PE_neighbor_att', 'self_att_neighbor_att', 'entire_model'],
help='the model needs to be trained for ablation studies')
parser.add_argument('--self_neighbor', type=bool, default=True,
help='use self as first neighbor in top k')
parser.add_argument('--neighbor', type=bool, default=True,
help='If consider neighbor attention')
parser.add_argument('--use_pe', type=bool, default=True,
help='use positional encoding')
parser.add_argument('--batch_size', type=int, default=8, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=7, metavar='batch_size',
help='Size of batch)')
parser.add_argument('--epochs', type=int, default=1, metavar='N',
help='number of epochs to train ')
parser.add_argument('--use_sgd', type=bool, default=False,
help='Use SGD')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001, 0.1 if using sgd)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--no_cuda', type=bool, default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--eval', type=bool, default=False,
help='evaluate the model')
parser.add_argument('--num_points', type=int, default=1024,
help='num of points to use')
parser.add_argument('--num_heads', type=int, default=1,
help='num of attn heads to use. Set to 0 for no attn')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout rate')
parser.add_argument('--emb_dims', type=int, default=1024, metavar='N',
help='Dimension of embeddings')
parser.add_argument('--k', type=int, default=10, metavar='N',
help='Num of nearest neighbors to use')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
parser.add_argument('--PE_dim', type=float, default=32, metavar='N',
help='output dimmension of positional encoding (if use_pe fasle) this should be 4')
parser.add_argument('--save_features', type=str, default='', metavar='N',
help='Save extracted features path')
parser.add_argument('--save_train_results', type=str, default='/home/luo00042/M2SSD/SAMCNet/Results/', metavar='N',
help='save training results (e.g., loss, acc, ... )')
args = parser.parse_args()
_init_()
io = IOStream('checkpoints/' + args.exp_name + '/run.log')
io.cprint(str(args))
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
io.cprint(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
torch.cuda.manual_seed(args.seed)
else:
io.cprint('Using CPU')
if not args.eval:
train(args, io)
else:
test(args, io) # Not implemented (yet)
| 43.633333
| 162
| 0.618691
|
d0bf757cf4de671dd60f99e1bc35ff5497f008ac
| 1,814
|
py
|
Python
|
src/tickets/signals.py
|
flokli/bornhack-website
|
9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d
|
[
"BSD-3-Clause"
] | null | null | null |
src/tickets/signals.py
|
flokli/bornhack-website
|
9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d
|
[
"BSD-3-Clause"
] | null | null | null |
src/tickets/signals.py
|
flokli/bornhack-website
|
9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from datetime import timedelta, datetime
from django.db.models import Count
from django.utils import timezone
from events.handler import handle_team_event
def ticket_changed(sender, instance, created, **kwargs):
"""
This signal is called every time a ShopTicket is saved
"""
# only trigger an event when a new ticket is created
if not created:
return
# get ticket stats
from .models import ShopTicket
# TODO: this is nasty, get the prefix some other way
ticket_prefix = "BornHack {}".format(datetime.now().year)
stats = ", ".join(
[
"{}: {}".format(
tickettype["product__name"].replace("{} ".format(ticket_prefix), ""),
tickettype["total"],
)
for tickettype in ShopTicket.objects.filter(
product__name__startswith=ticket_prefix
)
.exclude(product__name__startswith="{} One Day".format(ticket_prefix))
.values("product__name")
.annotate(total=Count("product__name"))
.order_by("-total")
]
)
onedaystats = ShopTicket.objects.filter(
product__name__startswith="{} One Day Ticket".format(ticket_prefix)
).count()
onedaychildstats = ShopTicket.objects.filter(
product__name__startswith="{} One Day Children".format(ticket_prefix)
).count()
# queue the messages
handle_team_event(
eventtype="ticket_created", irc_message="%s sold!" % instance.product.name
)
# limit this one to a length of 200 because IRC is nice
handle_team_event(
eventtype="ticket_created",
irc_message="Totals: {}, 1day: {}, 1day child: {}".format(
stats, onedaystats, onedaychildstats
)[:200],
)
| 32.392857
| 85
| 0.631753
|
612e7bb3e2f4a914e59a740edcfdadec033cb92b
| 352
|
py
|
Python
|
backend/common/databases/token.py
|
NezzarClp/Tosurnament
|
7aa750f6f663ae0f3f875d5fb50a35b422bd0dd2
|
[
"MIT"
] | 7
|
2018-03-13T12:21:37.000Z
|
2021-07-10T21:47:05.000Z
|
backend/common/databases/token.py
|
NezzarClp/Tosurnament
|
7aa750f6f663ae0f3f875d5fb50a35b422bd0dd2
|
[
"MIT"
] | 44
|
2017-12-28T15:22:27.000Z
|
2021-09-17T20:56:17.000Z
|
backend/common/databases/token.py
|
NezzarClp/Tosurnament
|
7aa750f6f663ae0f3f875d5fb50a35b422bd0dd2
|
[
"MIT"
] | 9
|
2020-07-03T10:07:05.000Z
|
2022-02-27T15:09:11.000Z
|
"""Token class"""
from mysqldb_wrapper import Base, Id
class Token(Base):
"""Token class"""
__tablename__ = "token"
id = Id()
session_token = bytes()
discord_user_id = str()
access_token = str()
token_type = str()
access_token_expiry_date = int()
refresh_token = str()
scope = str()
expiry_date = int()
| 17.6
| 36
| 0.613636
|
0c05465cacb66f28ef26b8f8019b9de6b9eff6a4
| 184
|
py
|
Python
|
EulerProject/euler_25.py
|
bruno-zaccariello/usefull
|
a334a4f6daf79101f48a0a98a665bf64c4354c18
|
[
"Apache-2.0"
] | null | null | null |
EulerProject/euler_25.py
|
bruno-zaccariello/usefull
|
a334a4f6daf79101f48a0a98a665bf64c4354c18
|
[
"Apache-2.0"
] | null | null | null |
EulerProject/euler_25.py
|
bruno-zaccariello/usefull
|
a334a4f6daf79101f48a0a98a665bf64c4354c18
|
[
"Apache-2.0"
] | null | null | null |
from my_generators.fibonacci import fibonacci
def get_result():
count = 0
for fib in fibonacci():
count += 1
if len(str(fib)) >= 1000:
return count
| 23
| 45
| 0.592391
|
d24ae11cc4bd23d1ada20b8624f78b1e3f05e758
| 9,270
|
py
|
Python
|
ietf/api/views.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | null | null | null |
ietf/api/views.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T08:20:11.000Z
|
2022-03-11T08:21:53.000Z
|
ietf/api/views.py
|
hassanakbar4/ietfdb
|
cabee059092ae776015410640226064331c293b7
|
[
"BSD-3-Clause"
] | 1
|
2021-10-05T12:49:27.000Z
|
2021-10-05T12:49:27.000Z
|
# Copyright The IETF Trust 2017-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import json
import pytz
from jwcrypto.jwk import JWK
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.gzip import gzip_page
from django.views.generic.detail import DetailView
from tastypie.exceptions import BadRequest
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.utils import is_valid_jsonp_callback_value
from tastypie.serializers import Serializer
import debug # pyflakes:ignore
import ietf
from ietf.person.models import Person, Email
from ietf.api import _api_list
from ietf.api.serializer import JsonExportMixin
from ietf.ietfauth.views import send_account_creation_email
from ietf.ietfauth.utils import role_required
from ietf.meeting.models import Meeting
from ietf.stats.models import MeetingRegistration
from ietf.utils.decorators import require_api_key
from ietf.utils.log import log
from ietf.utils.models import DumpInfo
def top_level(request):
available_resources = {}
apitop = reverse('ietf.api.views.top_level')
for name in sorted([ name for name, api in _api_list if len(api._registry) > 0 ]):
available_resources[name] = {
'list_endpoint': '%s/%s/' % (apitop, name),
}
serializer = Serializer()
desired_format = determine_format(request, serializer)
options = {}
if 'text/javascript' in desired_format:
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
serialized = serializer.serialize(available_resources, desired_format, options)
return HttpResponse(content=serialized, content_type=build_content_type(desired_format))
def api_help(request):
key = JWK()
# import just public part here, for display in info page
key.import_from_pem(settings.API_PUBLIC_KEY_PEM)
return render(request, "api/index.html", {'key': key, 'settings':settings, })
@method_decorator((login_required, gzip_page), name='dispatch')
class PersonalInformationExportView(DetailView, JsonExportMixin):
model = Person
def get(self, request):
person = get_object_or_404(self.model, user=request.user)
expand = ['searchrule', 'documentauthor', 'ad_document_set', 'ad_dochistory_set', 'docevent',
'ballotpositiondocevent', 'deletedevent', 'email_set', 'groupevent', 'role', 'rolehistory', 'iprdisclosurebase',
'iprevent', 'liaisonstatementevent', 'whitelisted', 'schedule', 'constraint', 'schedulingevent', 'message',
'sendqueue', 'nominee', 'topicfeedbacklastseen', 'alias', 'email', 'apikeys', 'personevent',
'reviewersettings', 'reviewsecretarysettings', 'unavailableperiod', 'reviewwish',
'nextreviewerinteam', 'reviewrequest', 'meetingregistration', 'submissionevent', 'preapproval',
'user', 'user__communitylist', 'personextresource_set', ]
return self.json_view(request, filter={'id':person.id}, expand=expand)
@method_decorator((csrf_exempt, require_api_key, role_required('Robot')), name='dispatch')
class ApiV2PersonExportView(DetailView, JsonExportMixin):
model = Person
def err(self, code, text):
return HttpResponse(text, status=code, content_type='text/plain')
def post(self, request):
querydict = request.POST.copy()
querydict.pop('apikey', None)
expand = querydict.pop('_expand', [])
if not querydict:
return self.err(400, "No filters provided")
return self.json_view(request, filter=querydict.dict(), expand=expand)
# @require_api_key
# @csrf_exempt
# def person_access_token(request):
# person = get_object_or_404(Person, user=request.user)
#
# if request.method == 'POST':
# client_id = request.POST.get('client_id', None)
# client_secret = request.POST.get('client_secret', None)
# client = get_object_or_404(ClientRecord, client_id=client_id, client_secret=client_secret)
#
# return HttpResponse(json.dumps({
# 'name' : person.plain_name(),
# 'email': person.email().address,
# 'roles': {
# 'chair': list(person.role_set.filter(name='chair', group__state__in=['active', 'bof', 'proposed']).values_list('group__acronym', flat=True)),
# 'secr': list(person.role_set.filter(name='secr', group__state__in=['active', 'bof', 'proposed']).values_list('group__acronym', flat=True)),
# }
# }), content_type='application/json')
# else:
# return HttpResponse(status=405)
@require_api_key
@role_required('Robot')
@csrf_exempt
def api_new_meeting_registration(request):
'''REST API to notify the datatracker about a new meeting registration'''
def err(code, text):
return HttpResponse(text, status=code, content_type='text/plain')
required_fields = [ 'meeting', 'first_name', 'last_name', 'affiliation', 'country_code',
'email', 'reg_type', 'ticket_type', ]
fields = required_fields + []
if request.method == 'POST':
# parameters:
# apikey:
# meeting
# name
# email
# reg_type (In Person, Remote, Hackathon Only)
# ticket_type (full_week, one_day, student)
#
data = {'attended': False, }
missing_fields = []
for item in fields:
value = request.POST.get(item, None)
if value is None and item in required_fields:
missing_fields.append(item)
data[item] = value
log("Meeting registration notification: %s" % json.dumps(data))
if missing_fields:
return err(400, "Missing parameters: %s" % ', '.join(missing_fields))
number = data['meeting']
try:
meeting = Meeting.objects.get(number=number)
except Meeting.DoesNotExist:
return err(400, "Invalid meeting value: '%s'" % (number, ))
email = data['email']
try:
validate_email(email)
except ValidationError:
return err(400, "Invalid email value: '%s'" % (email, ))
if request.POST.get('cancelled', 'false') == 'true':
MeetingRegistration.objects.filter(meeting_id=meeting.pk, email=email).delete()
return HttpResponse('OK', status=200, content_type='text/plain')
else:
object, created = MeetingRegistration.objects.get_or_create(meeting_id=meeting.pk, email=email)
try:
# Set attributes not already in the object
for key in set(data.keys())-set(['attended', 'apikey', 'meeting', 'email',]):
new = data.get(key)
cur = getattr(object, key, None)
if key in ['reg_type', 'ticket_type', ] and new:
# Special handling for multiple reg types
if cur:
if not new in cur:
setattr(object, key, cur+' '+new)
else:
setattr(object, key, new)
else:
setattr(object, key, new)
person = Person.objects.filter(email__address=email)
if person.exists():
object.person = person.first()
object.save()
except ValueError as e:
return err(400, "Unexpected POST data: %s" % e)
response = "Accepted, New registration" if created else "Accepted, Updated registration"
if User.objects.filter(username=email).exists() or Email.objects.filter(address=email).exists():
pass
else:
send_account_creation_email(request, email)
response += ", Email sent"
return HttpResponse(response, status=202, content_type='text/plain')
else:
return HttpResponse(status=405)
def version(request):
dumpinfo = DumpInfo.objects.order_by('-date').first()
dumptime = pytz.timezone(dumpinfo.tz).localize(dumpinfo.date).strftime('%Y-%m-%d %H:%M:%S %z') if dumpinfo else None
return HttpResponse(
json.dumps({
'version': ietf.__version__+ietf.__patch__,
'date': ietf.__date__[7:-2],
'dumptime': dumptime,
}),
content_type='application/json',
)
@require_api_key
@csrf_exempt
def author_tools(request):
return HttpResponse(
json.dumps({'success': True}),
content_type='application/json')
| 41.2
| 167
| 0.637109
|
ddf40e4dc447d0d1a25a72aa087b5aff9d27566d
| 1,171
|
py
|
Python
|
app/core/models.py
|
jrgonh/recipe-app-api
|
bbef420372fab173583b5f035c86d714b0453a14
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
jrgonh/recipe-app-api
|
bbef420372fab173583b5f035c86d714b0453a14
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
jrgonh/recipe-app-api
|
bbef420372fab173583b5f035c86d714b0453a14
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager,\
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email and password"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 30.815789
| 76
| 0.687447
|
8e622bbc00f92ecc32390b6b8feec381fa0a2980
| 1,657
|
py
|
Python
|
vendor/github.com/elastic/beats/auditbeat/tests/system/test_base.py
|
fakturk/countbeat
|
bda27b7c8d903ab00d66f749e3491902e180d0f3
|
[
"Apache-2.0"
] | 5
|
2018-05-10T17:46:22.000Z
|
2020-11-19T06:06:27.000Z
|
vendor/github.com/elastic/beats/auditbeat/tests/system/test_base.py
|
fakturk/countbeat
|
bda27b7c8d903ab00d66f749e3491902e180d0f3
|
[
"Apache-2.0"
] | 3
|
2018-03-05T08:57:45.000Z
|
2020-05-11T17:19:17.000Z
|
vendor/github.com/elastic/beats/auditbeat/tests/system/test_base.py
|
fakturk/countbeat
|
bda27b7c8d903ab00d66f749e3491902e180d0f3
|
[
"Apache-2.0"
] | 2
|
2020-09-21T03:25:18.000Z
|
2020-11-19T06:06:22.000Z
|
import re
import sys
import unittest
from auditbeat import BaseTest
from elasticsearch import Elasticsearch
from beat.beat import INTEGRATION_TESTS
class Test(BaseTest):
@unittest.skipUnless(re.match("(?i)linux", sys.platform), "os")
def test_start_stop(self):
"""
Auditbeat starts and stops without error.
"""
self.render_config_template(modules=[{
"name": "audit",
"metricsets": ["kernel"],
}])
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("start running"))
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
# Ensure all Beater stages are used.
assert self.log_contains("Setup Beat: auditbeat")
assert self.log_contains("auditbeat start running")
assert self.log_contains("auditbeat stopped")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_template(self):
"""
Test that the template can be loaded with `setup --template`
"""
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "audit",
"metricsets": ["file"],
"extras": {
"file.paths": ["file.example"],
},
}],
elasticsearch={"host": self.get_elasticsearch_url()})
exit_code = self.run_beat(extra_args=["setup", "--template"])
assert exit_code == 0
assert self.log_contains('Loaded index template')
assert len(es.cat.templates(name='auditbeat-*', h='name')) > 0
| 33.14
| 70
| 0.601086
|
2b23ac568f339083a8bf24563a11147da62aa427
| 5,196
|
py
|
Python
|
debug_and_testing/ipyleaflet_test.py
|
nathangeology/FORCE_Geolocation_Docs
|
1f7ef447bacdbdd424dce5ba8f37e738462ba372
|
[
"MIT"
] | 3
|
2019-09-18T07:17:29.000Z
|
2019-11-01T16:32:24.000Z
|
debug_and_testing/ipyleaflet_test.py
|
nathangeology/FORCE_Geolocation_Docs
|
1f7ef447bacdbdd424dce5ba8f37e738462ba372
|
[
"MIT"
] | null | null | null |
debug_and_testing/ipyleaflet_test.py
|
nathangeology/FORCE_Geolocation_Docs
|
1f7ef447bacdbdd424dce5ba8f37e738462ba372
|
[
"MIT"
] | null | null | null |
import altair as alt
from data_load_functions import *
# import folium
import geopandas as gpd
# from folium.plugins import MarkerCluster
import numpy as np
from ipyleaflet import Polygon, Map, basemaps, basemap_to_tiles, GeoJSON, MarkerCluster, LayersControl, FullScreenControl
from ipywidgets import RadioButtons
import json
from viz_funcs import *
import pickle
if __name__ == '__main__':
file_path = "joined_dfs.pkl"
with open(file_path, "rb") as f:
final_dataframe = pickle.load(f)
check_cols = list(final_dataframe.columns[-9:])
map_dict = get_prepped_dfs()
structures_map = map_dict['structures']
fields_map = map_dict['fields']
blocks_map = map_dict['blocks']
subareas_map = map_dict['sub_areas']
discoveries_map = map_dict['discoveries']
facilities_map = map_dict['facilities']
m = Map(
layers=(basemap_to_tiles(basemaps.Esri.WorldTopoMap),),
center=(60.5, 5),
zoom=4,
figsize=(10, 15)
)
structure_layer = create_layer(structures_map, 'structures',
label_col='steNameEN', secondary_label_col='document',
layer_type='polygon', filter_on='document', inverse=True, color='lightGray')
structure_layer_docs = create_layer(structures_map, 'structures_docs',
label_col='steNameEN', secondary_label_col='document',
layer_type='polygon', filter_on='document', color='orange')
fields_layer = create_layer(fields_map, 'fields',
label_col='FIELDNAME', secondary_label_col='document',
layer_type='polygon', filter_on='document', inverse=True, color='lightGray')
fields_layer_docs = create_layer(fields_map, 'fields_docs',
label_col='FIELDNAME', secondary_label_col='document',
layer_type='polygon', filter_on='document', color='red')
subareas_layer = create_layer(subareas_map, 'subareas',
label_col='NAME', secondary_label_col='document',
layer_type='polygon', filter_on='document', inverse=True, color='lightGray')
subareas_layer_docs = create_layer(subareas_map, 'subareas_docs',
label_col='NAME', secondary_label_col='document',
layer_type='polygon', filter_on='document', color='blue')
discoveries_layer = create_layer(discoveries_map, 'discoveries',
label_col='DISCNAME', secondary_label_col='document',
layer_type='polygon', filter_on='document', inverse=True, color='lightGray')
discoveries_layer_docs = create_layer(discoveries_map, 'discoveries_docs',
label_col='DISCNAME', secondary_label_col='document',
layer_type='polygon', filter_on='document', color='green')
facilities_layer = create_layer(facilities_map, 'facilities',
label_col='FACNAME', secondary_label_col='document',
layer_type='marker', filter_on='document', inverse=True, color='lightGray')
facilities_layer_docs = create_layer(facilities_map, 'facilities_docs',
label_col='FACNAME', secondary_label_col='document',
layer_type='marker', filter_on='document', color='black')
wells = map_dict['wells']
well_layer = create_layer(wells, 'wells with docs',
label_col='wlbWell', secondary_label_col='document',
layer_type='marker', filter_on='document', color='red')
well_layer_no_docs = create_layer(wells, 'wells with docs',
label_col='wlbWell', secondary_label_col='document',
layer_type='marker', filter_on='document', inverse=True, color='lightGray')
marker_cluster = MarkerCluster(markers=well_layer.layers, name='Wells with Docs')
marker_cluster2 = MarkerCluster(markers=well_layer_no_docs.layers, name='Wells without Docs')
# marker_cluster.add_layer(well_layer)
m.add_layer(structure_layer_docs)
m.add_layer(structure_layer)
m.add_layer(fields_layer_docs)
m.add_layer(fields_layer)
m.add_layer(subareas_layer)
m.add_layer(subareas_layer_docs)
m.add_layer(facilities_layer_docs)
m.add_layer(facilities_layer)
m.add_layer(discoveries_layer_docs)
m.add_layer(discoveries_layer)
m.add_layer(marker_cluster)
m.add_layer(marker_cluster2)
m.add_control(LayersControl())
m.add_control(FullScreenControl())
comments = []
for i in check_cols:
comments.append(create_layer(final_dataframe[final_dataframe[i] != "Empty"],
i, label_col='wlbWellbor', secondary_label_col=i, layer_type='marker',
color='green'))
m.add_layer(comments[-1])
m
| 54.125
| 121
| 0.617013
|
386169f4240758941050281b465346c03f52ba4a
| 360
|
py
|
Python
|
blog/migrations/0027_auto_20190416_1119.py
|
akindele214/181hub_2
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
[
"MIT"
] | 1
|
2020-05-20T08:42:49.000Z
|
2020-05-20T08:42:49.000Z
|
blog/migrations/0027_auto_20190416_1119.py
|
akindele214/181hub_2
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
[
"MIT"
] | 14
|
2020-03-24T17:31:08.000Z
|
2022-03-11T23:59:30.000Z
|
blog/migrations/0027_auto_20190416_1119.py
|
akindele214/181hub_2
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
[
"MIT"
] | 1
|
2020-04-13T12:37:37.000Z
|
2020-04-13T12:37:37.000Z
|
# Generated by Django 2.1.7 on 2019-04-16 10:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0026_auto_20190416_0712'),
]
operations = [
migrations.RenameField(
model_name='share',
old_name='user',
new_name='shared_by',
),
]
| 18.947368
| 47
| 0.580556
|
d77ef700171752c26fba6a3ccbc4cc05d0ff050e
| 1,389
|
py
|
Python
|
tests/unittest/test_index_routes.py
|
jrrlokken/issue-tracking-system
|
33b9b68dd821c8a05cdbc8de87645f61da06f0bd
|
[
"MIT"
] | 1
|
2021-02-20T04:26:36.000Z
|
2021-02-20T04:26:36.000Z
|
tests/unittest/test_index_routes.py
|
jrrlokken/issue-tracking-system
|
33b9b68dd821c8a05cdbc8de87645f61da06f0bd
|
[
"MIT"
] | 2
|
2021-04-06T18:14:38.000Z
|
2021-06-02T02:45:15.000Z
|
tests/unittest/test_index_routes.py
|
jrrlokken/issue-tracking-system
|
33b9b68dd821c8a05cdbc8de87645f61da06f0bd
|
[
"MIT"
] | null | null | null |
import unittest
from app import app
class RenderTests(unittest.TestCase):
"""Test each route for proper render."""
def setUp(self):
app.config['TESTING'] = True
app.config['DEBUG'] = False
self.client = app.test_client()
def test_homepage(self):
"""Test root route."""
with self.client:
response = self.client.get('/')
self.assertIn(b'Welcome to ITS', response.data)
self.assertEqual(200, response.status_code)
def test_register_route(self):
"""Test /register route."""
with self.client:
response = self.client.get('/register')
self.assertIn(b'Register for an account', response.data)
self.assertEqual(200, response.status_code)
def test_login_route(self):
"""Test /login route."""
with self.client:
response = self.client.get('/login')
self.assertIn(b'Login', response.data)
self.assertEqual(200, response.status_code)
def test_logout_route(self):
"""Test /logout route."""
with self.client:
response = self.client.get('/logout')
self.assertEqual(302, response.status_code)
def test_404_route(self):
"""Test the 404 handler."""
with self.client:
response = self.client.get('/blargh')
self.assertIn(b'Nothing lives here...', response.data)
self.assertEqual(404, response.status_code)
if __name__ == "__main__":
unittest.main()
| 27.78
| 62
| 0.664507
|
92a5ba49623514f5b86bff370a5ad00cfa8edf72
| 1,735
|
py
|
Python
|
ckan/tests/legacy/models/test_misc.py
|
larrycameron80/ckan
|
fbab8f51b36a293206fdd998d71ece27d2565951
|
[
"Apache-2.0"
] | 1
|
2022-02-14T20:25:34.000Z
|
2022-02-14T20:25:34.000Z
|
ckan/tests/legacy/models/test_misc.py
|
larrycameron80/ckan
|
fbab8f51b36a293206fdd998d71ece27d2565951
|
[
"Apache-2.0"
] | 4
|
2020-03-24T17:53:23.000Z
|
2021-03-31T19:19:03.000Z
|
ckan/tests/legacy/models/test_misc.py
|
larrycameron80/ckan
|
fbab8f51b36a293206fdd998d71ece27d2565951
|
[
"Apache-2.0"
] | 3
|
2020-01-02T10:32:37.000Z
|
2021-12-22T07:20:21.000Z
|
# encoding: utf-8
from nose.tools import assert_equal
from ckan.tests.legacy import *
import ckan.model as model
from ckan.model.misc import escape_sql_like_special_characters
_sql_escape = escape_sql_like_special_characters
class TestEscapeSqlLikeCharacters(object):
"""
Tests for model.misc.escape_sql_like_special_characters
"""
def test_identity(self):
"""Asserts that it escapes nothing if nothing needs escaping"""
terms = ['',
'word',
'two words']
for term, expected_term in zip(terms, terms):
assert_equal(_sql_escape(term), expected_term)
def test_escape_chararacter_is_escaped(self):
"""Asserts that the escape character is escaped"""
term = r'backslash \ character'
assert_equal (_sql_escape(term, escape='\\'),
r'backslash \\ character')
term = 'surprise!'
assert_equal (_sql_escape(term, escape='!'),
r'surprise!!')
def test_default_escape_character_is_a_backslash(self):
"""Asserts that the default escape character is the backslash"""
term = r'backslash \ character'
assert_equal (_sql_escape(term),
r'backslash \\ character')
def test_sql_like_special_characters_are_escaped(self):
"""Asserts that '%' and '_' are escaped correctly"""
terms = [
(r'percents %', r'percents \%'),
(r'underscores _', r'underscores \_'),
(r'backslash \ ', r'backslash \\ '),
(r'all three \ _%', r'all three \\ \_\%'),
]
for term, expected_result in terms:
assert_equal(_sql_escape(term), expected_result)
| 33.365385
| 72
| 0.613833
|
7f18e842218c9762d2c6664fa456f9d23f5f20ad
| 7,459
|
py
|
Python
|
fate_flow/apps/data_access_app.py
|
yzjba/FATE
|
9a6d252da637b2583a0f8a51f6cb4c615850bab9
|
[
"Apache-2.0"
] | 1
|
2021-05-31T16:39:30.000Z
|
2021-05-31T16:39:30.000Z
|
fate_flow/apps/data_access_app.py
|
ErikSun2020/FATE
|
bdda535c7d8a974fc2c43102837964b7da199730
|
[
"Apache-2.0"
] | 9
|
2020-11-13T18:59:35.000Z
|
2022-02-10T02:13:58.000Z
|
fate_flow/apps/data_access_app.py
|
ErikSun2020/FATE
|
bdda535c7d8a974fc2c43102837964b7da199730
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
from flask import Flask, request
from arch.api import session
from fate_flow.manager.data_manager import query_data_view
from fate_flow.settings import stat_logger, USE_LOCAL_DATA, WORK_MODE
from fate_flow.utils.api_utils import get_json_result
from fate_flow.utils import detect_utils, job_utils, session_utils
from fate_flow.driver.job_controller import JobController
from fate_flow.utils.job_utils import get_job_configuration, generate_job_id, get_job_directory
from fate_flow.entity.runtime_config import RuntimeConfig
manager = Flask(__name__)
@manager.errorhandler(500)
def internal_server_error(e):
stat_logger.exception(e)
return get_json_result(retcode=100, retmsg=str(e))
@manager.route('/<access_module>', methods=['post'])
@session_utils.session_detect()
def download_upload(access_module):
job_id = generate_job_id()
if access_module == "upload" and USE_LOCAL_DATA and not (request.json and request.json.get("use_local_data") == 0):
file = request.files['file']
filename = os.path.join(get_job_directory(job_id), 'fate_upload_tmp', file.filename)
os.makedirs(os.path.dirname(filename), exist_ok=True)
try:
file.save(filename)
except Exception as e:
shutil.rmtree(os.path.join(get_job_directory(job_id), 'tmp'))
raise e
request_config = request.args.to_dict()
request_config['file'] = filename
else:
request_config = request.json
required_arguments = ['work_mode', 'namespace', 'table_name']
if access_module == 'upload':
required_arguments.extend(['file', 'head', 'partition'])
elif access_module == 'download':
required_arguments.extend(['output_path'])
else:
raise Exception('can not support this operating: {}'.format(access_module))
detect_utils.check_config(request_config, required_arguments=required_arguments)
data = {}
if access_module == "upload":
data['table_name'] = request_config["table_name"]
data['namespace'] = request_config["namespace"]
if WORK_MODE != 0:
data_table = session.get_data_table(name=request_config["table_name"], namespace=request_config["namespace"])
count = data_table.count()
if count and int(request_config.get('drop', 2)) == 2:
return get_json_result(retcode=100,
retmsg='The data table already exists, table data count:{}.'
'If you still want to continue uploading, please add the parameter -drop. '
'0 means not to delete and continue uploading, '
'1 means to upload again after deleting the table'.format(
count))
elif count and int(request_config.get('drop', 2)) == 1:
data_table.destroy()
job_dsl, job_runtime_conf = gen_data_access_job_config(request_config, access_module)
job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = JobController.submit_job(
{'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}, job_id=job_id)
data.update({'job_dsl_path': job_dsl_path, 'job_runtime_conf_path': job_runtime_conf_path,
'board_url': board_url, 'logs_directory': logs_directory})
return get_json_result(job_id=job_id, data=data)
@manager.route('/upload/history', methods=['POST'])
def upload_history():
data = get_upload_history()
return get_json_result(retcode=0, retmsg='success', data=data)
def get_upload_history():
request_data = request.json
if request_data.get('job_id'):
tasks = job_utils.query_task(component_name='upload_0', status='success', job_id=request_data.get('job_id'))
else:
tasks = job_utils.query_task(component_name='upload_0', status='success')
limit= request_data.get('limit')
if not limit:
tasks = tasks[-1::-1]
else:
tasks = tasks[-1:-limit - 1:-1]
jobs_run_conf = get_job_configuration(None, None, None, tasks)
return get_upload_info(jobs_run_conf)
@session_utils.session_detect()
def get_upload_info(jobs_run_conf):
data = []
for job_id, job_run_conf in jobs_run_conf.items():
data_views = query_data_view(job_id=job_id, component_name='upload_0')[0]
info = {}
table_name = job_run_conf["table_name"][0]
namespace = job_run_conf["namespace"][0]
partition = job_run_conf["partition"][0]
info["upload_info"] = {
"table_name": table_name,
"namespace": namespace,
"partition": partition,
'upload_count': data_views.f_table_count_upload,
'actual_count': data_views.f_table_count_actual
}
info["notes"] = job_run_conf["notes"]
info["meta"] = session.get_data_table_metas(table_name, namespace)
data.append({job_id: info})
return data
def gen_data_access_job_config(config_data, access_module):
job_runtime_conf = {
"initiator": {},
"job_parameters": {},
"role": {},
"role_parameters": {}
}
initiator_role = "local"
initiator_party_id = 0
job_runtime_conf["initiator"]["role"] = initiator_role
job_runtime_conf["initiator"]["party_id"] = initiator_party_id
job_runtime_conf["job_parameters"]["work_mode"] = int(config_data["work_mode"])
job_runtime_conf["role"][initiator_role] = [initiator_party_id]
job_dsl = {
"components": {}
}
if access_module == 'upload':
job_runtime_conf["role_parameters"][initiator_role] = {
"upload_0": {
"work_mode": [int(config_data["work_mode"])],
"head": [int(config_data["head"])],
"partition": [int(config_data["partition"])],
"file": [config_data["file"]],
"namespace": [config_data["namespace"]],
"table_name": [config_data["table_name"]],
"in_version": [config_data.get("in_version")],
}
}
job_dsl["components"]["upload_0"] = {
"module": "Upload"
}
if access_module == 'download':
job_runtime_conf["role_parameters"][initiator_role] = {
"download_0": {
"work_mode": [config_data["work_mode"]],
"delimitor": [config_data.get("delimitor", ",")],
"output_path": [config_data["output_path"]],
"namespace": [config_data["namespace"]],
"table_name": [config_data["table_name"]]
}
}
job_dsl["components"]["download_0"] = {
"module": "Download"
}
return job_dsl, job_runtime_conf
| 40.983516
| 121
| 0.646333
|
029c719ef3a06da1e31964c4498882cc6ac0aca1
| 602
|
py
|
Python
|
9.classes/python_scopes_and_namespaces.py
|
yuishihara/python_tutorial
|
c88db5b1c002dcf69e183ed1a45c02a08aee905c
|
[
"MIT"
] | null | null | null |
9.classes/python_scopes_and_namespaces.py
|
yuishihara/python_tutorial
|
c88db5b1c002dcf69e183ed1a45c02a08aee905c
|
[
"MIT"
] | null | null | null |
9.classes/python_scopes_and_namespaces.py
|
yuishihara/python_tutorial
|
c88db5b1c002dcf69e183ed1a45c02a08aee905c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def scope_test():
def do_local():
spam = "local spam"
def do_nonlocal():
nonlocal spam
spam = "nonlocal spam"
def do_global():
global spam
spam = "global spam"
spam = "test spam"
do_local()
print("After local assignment:", spam)
do_nonlocal()
print("After nonlocal assignment:", spam)
do_global()
print("After global assignment:", spam)
def scopes_and_namespaces_example():
scope_test()
print("In global scope:", spam)
if __name__ == "__main__":
scopes_and_namespaces_example()
| 18.8125
| 45
| 0.619601
|
1850dd98502545a548ce0adafa21ba514e029703
| 634
|
py
|
Python
|
costar_task_plan/python/costar_task_plan/agent/ou_process.py
|
cpaxton/costar_plan
|
be5c12f9d0e9d7078e6a5c283d3be059e7f3d040
|
[
"Apache-2.0"
] | 66
|
2018-10-31T04:58:53.000Z
|
2022-03-17T02:32:25.000Z
|
costar_task_plan/python/costar_task_plan/agent/ou_process.py
|
cpaxton/costar_plan
|
be5c12f9d0e9d7078e6a5c283d3be059e7f3d040
|
[
"Apache-2.0"
] | 8
|
2018-10-23T21:19:25.000Z
|
2018-12-03T02:08:41.000Z
|
costar_task_plan/python/costar_task_plan/agent/ou_process.py
|
cpaxton/costar_plan
|
be5c12f9d0e9d7078e6a5c283d3be059e7f3d040
|
[
"Apache-2.0"
] | 25
|
2018-10-19T00:54:17.000Z
|
2021-10-10T08:28:15.000Z
|
import numpy as np
import matplotlib.pyplot as plt
class OUProcess(object):
"""docstring for OUNoise"""
def __init__(self,action_dimension,mu=0, theta=0.15, sigma=0.1):
self.action_dimension = action_dimension
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dimension) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.action_dimension) * self.mu
def noise(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
| 26.416667
| 77
| 0.62776
|
b0155a6aeff03b143edf0f8963e411e2b524a58b
| 6,044
|
py
|
Python
|
sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2018_01_01_preview/aio/_event_hub_management_client.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2018_01_01_preview/aio/_event_hub_management_client.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2018_01_01_preview/aio/_event_hub_management_client.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 1
|
2019-04-05T18:17:43.000Z
|
2019-04-05T18:17:43.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import EventHubManagementClientConfiguration
from .operations import ClustersOperations
from .operations import NamespacesOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import PrivateLinkResourcesOperations
from .operations import ConfigurationOperations
from .operations import DisasterRecoveryConfigsOperations
from .operations import EventHubsOperations
from .operations import ConsumerGroupsOperations
from .operations import Operations
from .operations import RegionsOperations
from .. import models
class EventHubManagementClient(object):
"""Azure Event Hubs client for managing Event Hubs Cluster, IPFilter Rules and VirtualNetworkRules resources.
:ivar clusters: ClustersOperations operations
:vartype clusters: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.ClustersOperations
:ivar namespaces: NamespacesOperations operations
:vartype namespaces: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.NamespacesOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.PrivateLinkResourcesOperations
:ivar configuration: ConfigurationOperations operations
:vartype configuration: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.ConfigurationOperations
:ivar disaster_recovery_configs: DisasterRecoveryConfigsOperations operations
:vartype disaster_recovery_configs: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.DisasterRecoveryConfigsOperations
:ivar event_hubs: EventHubsOperations operations
:vartype event_hubs: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.EventHubsOperations
:ivar consumer_groups: ConsumerGroupsOperations operations
:vartype consumer_groups: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.ConsumerGroupsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.Operations
:ivar regions: RegionsOperations operations
:vartype regions: azure.mgmt.eventhub.v2018_01_01_preview.aio.operations.RegionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = EventHubManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.clusters = ClustersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.namespaces = NamespacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.configuration = ConfigurationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.disaster_recovery_configs = DisasterRecoveryConfigsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.event_hubs = EventHubsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.consumer_groups = ConsumerGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.regions = RegionsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "EventHubManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 54.945455
| 173
| 0.759265
|
b23c6c503420c798fc685c8af2134bedd77113fe
| 883
|
py
|
Python
|
sentiment-analysis/log/log_config.py
|
arturgontijo/nlp-services
|
2bb3cef4aea4cd0687e984ef2414d5d2b8edc134
|
[
"MIT"
] | 15
|
2018-09-03T05:58:22.000Z
|
2020-07-01T04:52:49.000Z
|
sentiment-analysis/log/log_config.py
|
arturgontijo/nlp-services
|
2bb3cef4aea4cd0687e984ef2414d5d2b8edc134
|
[
"MIT"
] | 33
|
2018-08-08T17:55:11.000Z
|
2021-06-01T14:27:16.000Z
|
sentiment-analysis/log/log_config.py
|
arturgontijo/nlp-services
|
2bb3cef4aea4cd0687e984ef2414d5d2b8edc134
|
[
"MIT"
] | 25
|
2018-08-09T01:02:53.000Z
|
2020-12-20T05:09:21.000Z
|
# importing module
import logging
import logging.handlers
import sys
def getLogger(logger_name, test=None):
""" The method generates a logger instance to be reused.
:param logger_name: incoming logger name
:return: logger instance
"""
logger = logging.getLogger(str(logger_name))
log_level = logging.DEBUG
logger.setLevel(log_level)
formatter = logging.Formatter('%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')
filepath = 'log/service.log'
if test:
filepath = 'log/test.log'
fh = logging.handlers.RotatingFileHandler(filepath, maxBytes=104857600, backupCount=5)
fh.setLevel(log_level)
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(log_level)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
| 25.228571
| 112
| 0.703284
|
f892b87e991d5888bc6cb7dd6dfddca42968f182
| 17,278
|
py
|
Python
|
simulator/ObdlRender.py
|
michaelyeah7/magics_mbrl
|
7f1503986fd50c8336b8b9e7bb1d2f4be4e84b08
|
[
"MIT"
] | 2
|
2021-10-02T21:58:02.000Z
|
2022-03-23T15:34:46.000Z
|
simulator/ObdlRender.py
|
michaelyeah7/roblax
|
7f1503986fd50c8336b8b9e7bb1d2f4be4e84b08
|
[
"MIT"
] | null | null | null |
simulator/ObdlRender.py
|
michaelyeah7/roblax
|
7f1503986fd50c8336b8b9e7bb1d2f4be4e84b08
|
[
"MIT"
] | null | null | null |
import pybullet as p
import pybullet_data
import numpy as np
from simulator.UrdfUtils import matrix_to_rpy
from jbdl.rbdl.kinematics.calc_body_to_base_coordinates import calc_body_to_base_coordinates
from jbdl.rbdl.kinematics.transform_to_position import transform_to_position
import time
import math
import os
import jax.numpy as jnp
# from Simulator.UrdfReader import URDF
from simulator.UrdfWrapper import UrdfWrapper
class RenderObject():
def __init__(self):
self.type = ""
self.origin = [0,0,0]
self.shape = [0,0,0]
self.parent_joint = -1
self.link_id = -1
self.body_id = -1
self.rgba = [0,0,0,0]
self.position = [0.0,0.0,0.0]
self.quat = [0,0,0,1]
self.link_name = ""
self.init_rpy = [0.0,0.0,0.0]
return
def assign_prop(self,shape_type,origin,size,parent_joint,link_id,rgba,scale=[1,1,1]):
self.type = shape_type
self.origin = np.asarray(origin)
self.shape = size # filename if mesh
if(self.type != 'mesh' and self.type != 'cylinder' and self.type != 'capsule' and self.type != 'sphere'):
self.shape = np.asarray(size) /2.0
self.parent_joint = parent_joint
self.link_id = link_id
self.rgba = rgba
self.scale = scale
return
def assign_id(self,b_id):
self.body_id = b_id
return
def assign_pose(self,pos,q):
self.position = pos
self.quat = q
def assign_name(self,name):
self.link_name = name
def assign_initQua(self,qua,rpy):
"""
this qua is from urdf link rpy
"""
self.init_qua = qua
self.init_rpy = rpy
class ObdlRender():
def __init__(self,model):
self.urdf_path = model["urdf_path"]
self.robot= URDF.load(model["urdf_path"])
self.model=model
self.p = p
#launch pybullet
p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
self.plane_id = p.loadURDF("plane.urdf")
self.p.setTimeStep(1e-9) # for collision detect
#get mapping from model
self.id_mapping = dict()
#render
self.get_objects(self.robot)
self.create_objects()
def create_objects(self):
return
def get_objects(self,robot):
"""
get render object shape and local position, parents jointsID
render on the first time
"""
self.render_objects = []
NL = len(robot.links)
NJ = len(robot.joints)
self.NL = NL
#get parantes ID according to model
joint_orders = dict()
for i in range(len(self.model['jname'])):
_jname = self.model['jname'][i]
joint_orders[_jname] = i #0 is the root joint, not existing here
#get parents
parents = [0] * NL
name_dict = {}
for i in range(NL):
_n = robot.links[i].name
name_dict[_n] = i
for i in range(NJ):
_p, _c = robot.joints[i].parent,robot.joints[i].child
_pi, _ci = name_dict[_p],name_dict[_c]
_jname = robot.joints[i].name
parents[_ci] = joint_orders[_jname]#TODO error may happen here
# parents[_ci] = joint_orders[_jname]
#get shape and local position
_lid = 0
_model_lid = 1
_renderID = 0
current_q = [0.0]*self.model["NB"]
# self.rpy = np.zeros((3,))
self.rpys = np.zeros((NL,3))
self.quas = np.zeros((NL,4))
self.quas[:,-1] = 1.0
for l in robot.links:
visuals = l.visuals
for v in visuals:
_obj = RenderObject()
_pid = parents[_lid]
if(v.geometry.box):
box_size = v.geometry.box.size
box_origin = v.origin[:3,3] #matrix to xyz
box_color = [0.0,1.0,1.0,1.0]
if(v.material):
box_color = v.material.color
_obj.assign_prop("box",box_origin,box_size,_pid,_lid,box_color)
elif(v.geometry.cylinder):
cylinder_radius,cylinder_length = v.geometry.cylinder.radius,v.geometry.cylinder.length
cylinder_origin = v.origin[:3,3] #matrix to xyz
cylinder_color = [0.0,1.0,1.0,1.0]
if(v.material):
cylinder_color = v.material.color
_obj.assign_prop("cylinder",cylinder_origin,[cylinder_radius,cylinder_length],_pid,_lid,cylinder_color)
# elif(v.geometry.capsule):
# capsule_radius,capsule_length = v.geometry.capsule.radius,v.geometry.capsule.length
# capsule_origin = v.origin[:3,3] #matrix to xyz
# capsule_color = [0.0,1.0,1.0,1.0]
# if(v.material):
# capsule_color = v.material.color
# _obj.assign_prop("capsule",capsule_origin,[capsule_radius,capsule_length],_pid,_lid,capsule_color)
elif(v.geometry.sphere):
sphere_radius = v.geometry.sphere.radius
sphere_origin = v.origin[:3,3] #matrix to xyz
sphere_color = [0.0,1.0,1.0,1.0]
if(v.material):
sphere_color = v.material.color
_obj.assign_prop("sphere",sphere_origin,[sphere_radius],_pid,_lid,sphere_color)
elif(v.geometry.mesh):
mesh_name = os.path.dirname(self.urdf_path) + '/' + v.geometry.mesh.filename
mesh_origin = v.origin[:3,3] #matrix to xyz
mesh_scale = v.geometry.mesh.scale
mesh_color = [0.0,1.0,1.0,1.0] #doesn't matter
_obj.assign_prop("mesh",mesh_origin,[mesh_name],_pid,_lid,mesh_color,mesh_scale)
_p,_q = self.transform_pos(self.model,_obj,q=current_q)
_obj.assign_pose(_p,_q)
init_rpy = matrix_to_rpy(v.origin[:3,:3])
init_qua = self.p.getQuaternionFromEuler(init_rpy)
_obj.assign_initQua(init_qua,init_rpy)
bId = self.create_visualshape(target_obj=_obj)
_obj.assign_id(bId)
_obj.assign_name(l.name)
self.render_objects.append(_obj)
self.id_mapping[_model_lid] = _renderID
_renderID+=1
_lid+=1
_model_lid +=1
def create_visualshape(self,target_obj):
body_id = -1
vis_id = -1
if(target_obj.type == "box"):
vis_id = p.createVisualShape(p.GEOM_BOX, halfExtents=target_obj.shape,rgbaColor=target_obj.rgba,visualFrameOrientation=target_obj.init_qua)
col_id = p.createCollisionShape(p.GEOM_BOX, halfExtents=target_obj.shape)
elif(target_obj.type == "cylinder"):
vis_id = p.createVisualShape(p.GEOM_CYLINDER, radius=target_obj.shape[0],length=target_obj.shape[1], rgbaColor=target_obj.rgba,visualFrameOrientation=target_obj.init_qua)
col_id = p.createCollisionShape(p.GEOM_CYLINDER, radius=target_obj.shape[0],height=target_obj.shape[1])
elif(target_obj.type == "sphere"):
vis_id = p.createVisualShape(p.GEOM_SPHERE, radius=target_obj.shape[0],rgbaColor=target_obj.rgba,visualFrameOrientation=target_obj.init_qua)
col_id = p.createCollisionShape(p.GEOM_SPHERE, radius=target_obj.shape[0])
elif(target_obj.type == "capsule"):
vis_id = p.createVisualShape(p.GEOM_CAPSULE, radius=target_obj.shape[0],length=target_obj.shape[1],rgbaColor=target_obj.rgba,visualFrameOrientation=target_obj.init_qua)
col_id = p.createCollisionShape(p.GEOM_CAPSULE, radius=target_obj.shape[0],length=target_obj.shape[1])
elif(target_obj.type == "mesh"):
vis_id = p.createVisualShape(p.GEOM_MESH, fileName=target_obj.shape[0],meshScale=target_obj.scale,visualFrameOrientation=target_obj.init_qua)
col_id = p.createCollisionShape(p.GEOM_MESH, fileName=target_obj.shape[0],meshScale=target_obj.scale)
body_id = p.createMultiBody(baseMass=0.01, baseCollisionShapeIndex = col_id, baseVisualShapeIndex=vis_id, basePosition =target_obj.position,\
baseOrientation = target_obj.quat)
# body_id = p.createMultiBody(baseMass=0.01, baseVisualShapeIndex=vis_id, basePosition =target_obj.position,baseOrientation = target_obj.quat)
return body_id
def step_render(self,targetQ):
"""
render robot to the target joint angle
"""
self.rpys = np.zeros((3,))
# self.transform_rpy(self.model,targetQ)
self.transform_qua(self.model,targetQ)
n_obj = len(self.render_objects)
for i in range(n_obj):
if(self.render_objects[i].parent_joint == 0):
continue #TODO need discuss
pos,qua = self.transform_pos(self.model,self.render_objects[i],targetQ)
self.render_objects[i].assign_pose(pos,qua)
for _obj in self.render_objects:
self.move_obj(_obj)
return
def transform_rpy(self,model,q):
"""
transform the q to all rpy
"""
self.rpys = np.zeros((self.NL,3))
self.j_rpys = np.zeros((self.NL,3))
self.counted = np.zeros((self.NL,))
parent = np.array(self.model['parent'])
# print(parent)
#calc joint rpy
for i in range(self.NL):
_rpy = np.zeros((3,))
_pid = parent[i] -1
_rpy = np.array(self.j_rpys[_pid])
if(i == 0):
if(model['jtype'][0] == 0):
if(model['jaxis'][0]=='x'):
_rpy[0] = q[0]
elif(model['jaxis'][0]=='y'):
_rpy[1] = q[0]
elif(model['jaxis'][0]=='z'):
_rpy[2] = q[0]
else:
if(model['jtype'][i] == 0):
if(model['jaxis'][i]=='x'):
_rpy[0] = self.j_rpys[_pid][0] + q[i]
elif(model['jaxis'][i]=='y'):
_rpy[1] = self.j_rpys[_pid][1] + q[i]
elif(model['jaxis'][i]=='z'):
_rpy[2] = self.j_rpys[_pid][2] + q[i]
elif(model['jaxis'][i]=='a'):
_rpy[0] = self.j_rpys[_pid][0] - q[i]
elif(model['jaxis'][i]=='b'):
_rpy[1] = self.j_rpys[_pid][1] - q[i]
elif(model['jaxis'][i]=='c'):
_rpy[2] = self.j_rpys[_pid][2] - q[i]
# print("link",i,"parent",_pid,"angle",_rpy)
self.j_rpys[i] = _rpy
#calc link's rpy, which is qeqaul to parent joint rpy
self.rpys = self.j_rpys
# print("type",model['jtype'],"current_q",q,"rpys",self.rpys)
return
def transform_qua(self,model,q):
"""
transform the q to all rpy
"""
zeros_pos = np.zeros((3,))
self.quas = np.zeros((self.NL,4))
self.quas[:,-1] = 1.0
self.j_qua = np.zeros((self.NL,4))
self.j_qua[:,-1] = 1.0
parent = np.array(self.model['parent'])
# print(parent)
#calc joint rpy
for i in range(self.NL):
_pid = parent[i] -1
_qua = np.array([0.0,0.0,0.0,1.0])
_rpy = np.array([0.0,0.0,0.0])
if(i == 0):
if(model['jtype'][0] == 0):
if(model['jaxis'][0]=='x'):
_rpy = [q[0],0.0,0.0]
_qua = self.p.getQuaternionFromEuler (_rpy)
elif(model['jaxis'][0]=='y'):
_rpy = [0.0,q[0],0.0]
_qua = self.p.getQuaternionFromEuler (_rpy)
elif(model['jaxis'][0]=='z'):
_rpy = [0.0,0.0,q[0]]
_qua = self.p.getQuaternionFromEuler (_rpy)
else:
if(model['jtype'][i] == 0):
if(model['jaxis'][i]=='x'):
_rpy = [q[i],0.0,0.0]
elif(model['jaxis'][i]=='y'):
_rpy = [0.0,q[i],0.0]
elif(model['jaxis'][i]=='z'):
_rpy = [0.0,0.0,q[i]]
elif(model['jaxis'][i]=='a'):
_rpy = [-q[i],0.0,0.0]
elif(model['jaxis'][i]=='b'):
_rpy = [0.0,-q[i],0.0]
elif(model['jaxis'][i]=='c'):
_rpy = [0.0,0.0,-q[i]]
_qua = self.p.getQuaternionFromEuler (_rpy)
_pQua = np.array(self.j_qua[_pid])
_qua = self.p.multiplyTransforms(zeros_pos,_pQua,zeros_pos,_qua)[1]
# print("link",i,"parent",_pid,"qua:",_qua)
self.j_qua[i] = _qua
#calc link's rpy, which is qeqaul to parent joint rpy
# print(self.j_qua)
self.quas = self.j_qua
return
def move_obj(self,_obj):
# p.resetBasePositionAndOrientation(_obj.body_id,_obj.position,(0.0,0.0,0.0,1.0))
p.resetBasePositionAndOrientation(_obj.body_id,_obj.position,_obj.quat)
return
def transform_pos(self,model,obj,q):
"""
obj: render object,calc rpy from q
q: current angle of all joints
"""
pos,qua = None,None
q = np.asarray(q)
local_pos = np.asarray(obj.origin).flatten()
_jid = obj.parent_joint +1 #TODO need discuss
input = (model, q, _jid, local_pos)
pos = calc_body_to_base_coordinates(*input)
_rid = obj.parent_joint
# rpy = np.array(self.rpys[_rid])
# qua = p.getQuaternionFromEuler(rpy)
qua = self.quas[_rid]
pos = np.asarray(pos).flatten()
qua = np.asarray(qua).flatten()
return pos,qua
def transform_pos_2(self,model,obj,q):
"""
obj: render object, calc rpy from rotation matrx
q: current angle of all joints
problem: matrix_to_rpy has several return
"""
pos,qua = None,None
q = np.asarray(q)
local_pos = np.asarray(obj.origin).flatten()
_jid = obj.parent_joint +1 #TODO need discuss
input = (model, q, _jid, local_pos)
spatial_pos = CalcSpatialBodyToBaseCoordinates(*input) # this func use CalcBodyToBaseCoordinates but return X0_point(6x6)
pos = TransformToPosition(spatial_pos)
rot_mat = spatial_pos[0:3,0:3]
rpy = -1 * matrix_to_rpy(rot_mat,solution=1)# don't konw why -1
qua = p.getQuaternionFromEuler(rpy)
pos = np.asarray(pos).flatten()
qua = np.asarray(qua).flatten()
# print("link",obj.link_name,"parent joint",_jid, "pos",pos,"rpy",rpy)
return pos,qua
def get_poslist(self):
_res = []
for _obj in self.render_objects:
_res.append(_obj.position)
n = len(_res)
_res = np.reshape(np.asarray(_res),(n,3))
return _res
def check_collision(self,contact_ids):
self.p.stepSimulation() # for collision detect
cflags = [0.0] * len(contact_ids)
cpts = [np.zeros((3,))] * len(contact_ids)
n_id = len(contact_ids)
for i in range(n_id):
_contact_id = contact_ids[i]
_render_id = 0
if(_contact_id in self.id_mapping.keys()):
_render_id = self.id_mapping[_contact_id]
else:
continue
_lid = self.render_objects[_render_id].link_id
_info = self.p.getContactPoints(self.plane_id,_lid)
if(len(_info)>0):
cflags[i] = 2.0 #1.0
cpts[i] = np.array(_info[0][6])#np.array([0.0,-0.30,0.0])#np.array(_info[0][6])TODO:important: local pos of leg endpoint in the last joint
return cflags,cpts
if __name__ == "__main__":
model = UrdfWrapper("/root/RBDL/urdf/arm.urdf").model
model["jtype"] = np.asarray(model["jtype"])
model["parent"] = np.asarray(model["parent"])
from jaxRBDL.Utils.UrdfReader import URDF
rder = ObdlRender(model)
# q = [0.1] * 7
# q = np.array([ 0.0, 0.0,np.random.uniform(-math.pi/2,math.pi/2), np.random.uniform(-math.pi/2,math.pi/2), np.random.uniform(-math.pi/2,math.pi/2), \
# np.random.uniform(-math.pi/2,math.pi/2),0.0])
# q = np.array([ 0.0, 0.0,0.0, np.random.uniform(-math.pi/2,math.pi/2), np.random.uniform(-math.pi/2,math.pi/2), \
# np.random.uniform(-math.pi/2,math.pi/2),0.0])
q = [ 0.0,0.0,0.5,1.10944034 ,-1.41440399, 1.55847655,0.]
print("target q:",q)
rder.step_render(q)
poslist = rder.get_poslist()
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(np.asarray(poslist[:,0]), np.asarray(poslist[:,1]), np.asarray(poslist[:,2]), 'green',linewidth=7.5)
plt.show()
while(True):
time.sleep(0.5)
| 41.04038
| 182
| 0.549774
|
cec1de2a6639546d17fb7dd7eb09653aa22c391e
| 3,158
|
py
|
Python
|
src/freesound.py
|
lRomul/argus-birdsong
|
2290bd78f462cedc2ae143ec0b5e6e0782cd2b19
|
[
"MIT"
] | null | null | null |
src/freesound.py
|
lRomul/argus-birdsong
|
2290bd78f462cedc2ae143ec0b5e6e0782cd2b19
|
[
"MIT"
] | null | null | null |
src/freesound.py
|
lRomul/argus-birdsong
|
2290bd78f462cedc2ae143ec0b5e6e0782cd2b19
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from pathlib import Path
import multiprocessing as mp
from functools import partial
from src.audio import read_as_melspectrogram
from src.utils import get_params_hash
from src import config
NOISE_SOUNDS = [
'Buzz',
'Car_passing_by',
'Crackle',
'Cricket',
'Hiss',
'Mechanical_fan',
'Stream',
'Traffic_noise_and_roadway_noise',
'Walk_and_footsteps',
'Waves_and_surf',
'Crowd',
'Run',
'Female_speech_and_woman_speaking',
'Male_speech_and_man_speaking',
'Raindrop',
'Sink_(filling_or_washing)',
'Gurgling',
'Frying_(food)',
]
def check_noise(labels):
noise = True
for label in labels.split(','):
if label not in NOISE_SOUNDS:
noise = False
break
return noise
def make_spectrogram_and_save(file_path: Path, save_dir: Path, audio_params):
spec = read_as_melspectrogram(file_path, audio_params)
if spec.shape[1] >= 320:
save_dir.mkdir(parents=True, exist_ok=True)
save_path = save_dir / (file_path.name + '.npy')
np.save(save_path, spec)
def prepare_freesound_data(dir_path, audio_params):
dir_path = Path(dir_path)
file_path_lst = []
train_df = pd.read_csv(config.freesound_train_curated_csv_path)
for i, row in train_df.iterrows():
if check_noise(row.labels):
file_path = config.freesound_train_curated_dir / row.fname
file_path_lst.append(file_path)
train_df = pd.read_csv(config.freesound_train_noisy_csv_path)
for i, row in train_df.iterrows():
if check_noise(row.labels):
file_path = config.freesound_train_noisy_dir / row.fname
file_path_lst.append(file_path)
func = partial(make_spectrogram_and_save,
save_dir=dir_path, audio_params=audio_params)
with mp.Pool(mp.cpu_count()) as pool:
pool.map(func, file_path_lst)
def check_prepared_freesound_data(audio_params):
params_hash = get_params_hash({**audio_params.dict(),
'noise_sounds': NOISE_SOUNDS})
prepared_train_dir = config.freesound_prepared_train_curated_dir / params_hash
if not prepared_train_dir.exists():
print(f"Start preparing freesound dataset to '{prepared_train_dir}'")
prepare_freesound_data(prepared_train_dir, audio_params)
print(f"Dataset prepared.")
else:
print(f"'{prepared_train_dir}' already exists.")
def get_freesound_folds_data(audio_params):
params_hash = get_params_hash({**audio_params.dict(),
'noise_sounds': NOISE_SOUNDS})
prepared_train_dir = config.freesound_prepared_train_curated_dir / params_hash
folds_data = []
audio_paths = sorted(prepared_train_dir.glob("*.npy"))
for i, spec_path in enumerate(audio_paths):
sample = {
'ebird_code': 'nocall',
'spec_path': spec_path,
'fold': config.n_folds
}
folds_data.append(sample)
return folds_data
if __name__ == "__main__":
check_prepared_freesound_data(audio_params=config.audio)
| 30.07619
| 82
| 0.674478
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.